Spaces:
Sleeping
Sleeping
import pandas as pd | |
import os | |
from evaluations import documentation, requirements, training, validating, license, weights | |
from evaluations.utils import * | |
import zipfile | |
import os | |
import numpy as np | |
from huggingface_hub import InferenceClient | |
API_URL = "https://api-inference.huggingface.co/models/openlm-research/open_llama_3b_v2" | |
headers = {"Authorization": "Bearer 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", "x-wait-for-model": "true"} | |
client = InferenceClient( | |
"meta-llama/Llama-3.1-8B-Instruct", | |
token="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", | |
) | |
def evaluate(llm, verbose, repo_url, title=None, year=None): | |
repository_zip_name = "data/repo.zip" | |
token = os.getenv("githubToken") | |
# token = userdata.get('githubToken') | |
if (llm): | |
init_llm(verbose) | |
else: | |
log(verbose, "LOG", "No LLM will be used for the evaluation.") | |
results = { "pred_live": "Yes", "pred_dependencies": None, "pred_training": None, "pred_evaluation": None, "pred_weights": None, "pred_readme": None, "pred_license": None, "pred_stars": None, "pred_citations": None, "pred_valid": False} | |
try: | |
if (get_api_link(repo_url) != ""): | |
results["pred_valid"] = True | |
else: | |
return results | |
username, repo_name = decompose_url(repo_url) | |
log(verbose, "LOG", f"Fetching github repository: https://github.com/{username}/{repo_name}") | |
fetch_repo(verbose, repo_url, repository_zip_name, token) | |
if ((title != None) & (year != None) & (title != "") & (year != "")): | |
res = fetch_openalex(verbose, title, year) | |
if (res != None): | |
res = res["results"] | |
if (len(res) > 0): | |
res = res[0] | |
results["pred_citations"] = res["cited_by_count"] | |
if (not(os.path.exists(repository_zip_name))): | |
results["pred_live"] = "No" | |
return results | |
zip = zipfile.ZipFile(repository_zip_name) | |
readme = fetch_readme(zip) | |
results["pred_stars"] = fetch_repo_stars(verbose, repo_url, token) | |
if (len(zip.namelist()) <= 2): | |
log(verbose, "LOG", "Empty repository") | |
results["pred_live"] = "No" | |
results["pred_training"] = "No" | |
results["pred_evaluation"] = "No" | |
results["pred_weights"] = "No" | |
results["pred_packages"] = "No" | |
else: | |
results["pred_dependencies"] = requirements.evaluate(verbose, llm, zip, readme) | |
results["pred_training"] = training.evaluate(verbose, llm, zip, readme) | |
results["pred_evaluation"] = validating.evaluate(verbose, llm, zip, readme) | |
results["pred_weights"] = weights.evaluate(verbose, llm, zip, readme) | |
results["pred_readme"] = documentation.evaluate(verbose, llm, zip, readme) | |
results["pred_codetocomment"] = documentation.get_code_to_comment_ratio(zip) | |
results["pred_license"] = license.evaluate(verbose, llm, zip, readme) | |
return results | |
except Exception as e: | |
log(verbose, "ERROR", "Evaluating repository failed: " + str(e)) | |
results["pred_live"] = "No" | |
return results | |
def full_evaluation(): | |
paper_dump = pd.read_csv("data/dump.csv", sep="\t") | |
full_results = [] | |
for idx, row in paper_dump.iterrows(): | |
if (pd.isna(row["url"]) | (row["url"] == "")): | |
continue | |
print(str(int(100 * idx / paper_dump["title"].count())) + "% done") | |
result = evaluate(None, False, row["url"], row["title"], row["year"]) | |
for column in result.keys(): | |
row[column] = result[column] | |
full_results.append(row) | |
return pd.DataFrame(full_results) | |
def midl_evaluations(): | |
compare_to_gt = True | |
paper_dump = pd.read_csv("data/dump.csv", sep="\t") | |
verbose = 1 | |
eval_readme = [] | |
eval_training = [] | |
eval_evaluating = [] | |
eval_licensing = [] | |
eval_weights = [] | |
eval_dependencies = [] | |
full_results = [] | |
for idx, row in paper_dump.iterrows(): | |
if (row["venue"] != "MIDL"): | |
continue | |
if (row["venue"] == 2024): | |
continue | |
if (pd.isna(row["url"]) | (row["url"] == "")): | |
continue | |
print(f"\nEvaluating {idx+1} out of {len(paper_dump.index)} papers...") | |
print(f'Paper title - "{row["title"]}" ({row["year"]})') | |
print(f'Repository link - {row["url"]}') | |
result = evaluate(None, verbose, row["url"]) | |
for column in result.keys(): | |
row[column] = result[column] | |
full_results.append(row) | |
if (compare_to_gt): | |
print("\nSummary:") | |
if ((~pd.isna(row["dependencies"])) & (row["pred_dependencies"] is not None)): | |
eval_dependencies.append(row["pred_dependencies"] == row["dependencies"]) | |
print(f"Dependencies acc. - {row['pred_dependencies']} (GT:{row['dependencies']}) / {int(100 * np.mean(eval_dependencies))}%") | |
if ((~pd.isna(row["training"])) & (row["pred_dependencies"] is not None)): | |
eval_training.append(row["training"] == row["pred_training"]) | |
print(f"Training acc. -{row['pred_training']} (GT:{row['training']}) / {int(100 * np.mean(eval_training))}%") | |
if ((~pd.isna(row["evaluation"])) & (row["pred_dependencies"] is not None)): | |
eval_evaluating.append(row["evaluation"] == row["pred_evaluation"]) | |
print(f"Evaluating acc. - {row['pred_evaluation']} (GT:{row['evaluation']}) / {int(100 * np.mean(eval_evaluating))}%") | |
if ((~pd.isna(row["weights"])) & (row["pred_dependencies"] is not None)): | |
eval_weights.append(row["weights"] == row["pred_weights"]) | |
print(f"Weights acc. - {row['pred_weights']} (GT:{row['weights']}) / {int(100 * np.mean(eval_weights))}%") | |
if ((~pd.isna(row["readme"])) & (row["pred_dependencies"] is not None)): | |
eval_readme.append(row["readme"] == row["pred_readme"]) | |
print(f"README acc. - {row['pred_readme']} (GT:{row['readme']}) / {int(100 * np.mean(eval_readme))}%") | |
if ((~pd.isna(row["license"])) & (row["pred_dependencies"] is not None)): | |
eval_licensing.append(("No" if row["license"] == "No" else "Yes") == row["pred_license"]) | |
print(f"LICENSE acc. - {row['pred_license']} (GT:{row['license']}) / {int(100 * np.mean(eval_licensing))}%") | |
return pd.DataFrame(full_results) |