Spaces:
Running
Running
from fastapi import FastAPI, UploadFile, File | |
import requests | |
from transformers import pipeline | |
from sentence_transformers import CrossEncoder | |
import pandas as pd | |
import os | |
app = FastAPI() | |
ai_detector = pipeline("text-classification", model="roberta-base-openai-detector") | |
text_model = CrossEncoder("cross-encoder/stsb-roberta-large") | |
TEXT_DB = "text_plagiarism.csv" | |
def load_database(): | |
if not os.path.exists(TEXT_DB): | |
pd.DataFrame(columns=["content", "plagiarism_score"]).to_csv(TEXT_DB, index=False) | |
return pd.read_csv(TEXT_DB) | |
def save_to_database(content, plagiarism_score): | |
df = load_database() | |
new_entry = pd.DataFrame({"content": [content], "plagiarism_score": [plagiarism_score]}) | |
df = pd.concat([df, new_entry], ignore_index=True) | |
df.to_csv(TEXT_DB, index=False) | |
def check_text(text: str): | |
stored_texts = load_database()["content"].tolist() | |
if stored_texts: | |
similarity_scores = text_model.predict([[text, stored] for stored in stored_texts]) | |
highest_similarity = max(similarity_scores) | |
else: | |
highest_similarity = 0 | |
save_to_database(text, highest_similarity * 100) | |
return {"plagiarism_score": highest_similarity * 100} | |
def detect_ai(text: str): | |
result = ai_detector(text) | |
return result[0] | |