Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,19 +1,17 @@
|
|
1 |
import os
|
2 |
import gradio as gr
|
3 |
import pandas as pd
|
4 |
-
import numpy as np
|
5 |
import faiss
|
6 |
import torch
|
7 |
-
from datetime import datetime
|
8 |
from transformers import AutoTokenizer, AutoModel
|
9 |
-
from openai import OpenAI
|
|
|
10 |
|
11 |
-
# β
Load
|
12 |
-
|
13 |
-
|
14 |
-
client = OpenAI(api_key=OPENAI_API_KEY)
|
15 |
|
16 |
-
# β
Load FAISS index and product
|
17 |
index = faiss.read_index("deberta_faiss.index")
|
18 |
text_data = pd.read_csv("deberta_text_data.csv")["Retrieved Text"].tolist()
|
19 |
|
@@ -22,75 +20,71 @@ deberta_model_name = "microsoft/deberta-v3-base"
|
|
22 |
deberta_tokenizer = AutoTokenizer.from_pretrained(deberta_model_name)
|
23 |
deberta_model = AutoModel.from_pretrained(deberta_model_name).to("cpu")
|
24 |
|
25 |
-
# β
|
26 |
-
def generate_embeddings(
|
27 |
-
tokens = deberta_tokenizer(
|
28 |
with torch.no_grad():
|
29 |
embeddings = deberta_model(**tokens).last_hidden_state.mean(dim=1).cpu().numpy().astype("float32")
|
30 |
return embeddings
|
31 |
|
32 |
-
# β
|
33 |
-
def generate_response(
|
34 |
-
# Embed
|
35 |
-
query_embedding = generate_embeddings([
|
36 |
faiss.normalize_L2(query_embedding)
|
37 |
-
|
38 |
retrieved_docs = [text_data[i] for i in indices[0]]
|
39 |
-
context = "\n".join(set(retrieved_docs))
|
40 |
-
|
41 |
-
#
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
"""
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
messages
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
|
|
70 |
|
71 |
-
|
|
|
72 |
|
73 |
# β
Gradio Chat UI
|
74 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
75 |
gr.Markdown("## πͺ Luxury Decor Assistant (RAG)")
|
76 |
-
gr.Markdown(
|
77 |
-
"π¬ Ask your interior design questions using real product descriptions. "
|
78 |
-
"Powered by **DeBERTa + FAISS** β now upgraded to **OpenAI GPT-3.5** for enhanced answers."
|
79 |
-
)
|
80 |
|
81 |
-
chatbot = gr.Chatbot(label="Chatbot"
|
82 |
-
msg = gr.Textbox(label="Textbox", placeholder="e.g. Suggest cozy decor for
|
83 |
clear = gr.Button("Clear")
|
84 |
|
85 |
-
def respond(message,
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
history.append((message, response))
|
90 |
-
return history, ""
|
91 |
|
92 |
-
msg.submit(respond, [msg, chatbot], [
|
93 |
-
clear.click(lambda:
|
94 |
|
95 |
-
# β
Launch the app
|
96 |
demo.launch()
|
|
|
1 |
import os
|
2 |
import gradio as gr
|
3 |
import pandas as pd
|
|
|
4 |
import faiss
|
5 |
import torch
|
|
|
6 |
from transformers import AutoTokenizer, AutoModel
|
7 |
+
from openai import OpenAI
|
8 |
+
from huggingface_hub import login
|
9 |
|
10 |
+
# β
Load environment secrets securely
|
11 |
+
login(token=os.getenv("HF_TOKEN")) # Hugging Face login
|
12 |
+
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) # OpenAI GPT-3.5 auth
|
|
|
13 |
|
14 |
+
# β
Load FAISS index and product descriptions
|
15 |
index = faiss.read_index("deberta_faiss.index")
|
16 |
text_data = pd.read_csv("deberta_text_data.csv")["Retrieved Text"].tolist()
|
17 |
|
|
|
20 |
deberta_tokenizer = AutoTokenizer.from_pretrained(deberta_model_name)
|
21 |
deberta_model = AutoModel.from_pretrained(deberta_model_name).to("cpu")
|
22 |
|
23 |
+
# β
Embedding function
|
24 |
+
def generate_embeddings(texts):
|
25 |
+
tokens = deberta_tokenizer(texts, return_tensors="pt", padding=True, truncation=True).to("cpu")
|
26 |
with torch.no_grad():
|
27 |
embeddings = deberta_model(**tokens).last_hidden_state.mean(dim=1).cpu().numpy().astype("float32")
|
28 |
return embeddings
|
29 |
|
30 |
+
# β
RAG-based response using OpenAI
|
31 |
+
def generate_response(message, history):
|
32 |
+
# Embed and retrieve context
|
33 |
+
query_embedding = generate_embeddings([message])
|
34 |
faiss.normalize_L2(query_embedding)
|
35 |
+
_, indices = index.search(query_embedding, k=5)
|
36 |
retrieved_docs = [text_data[i] for i in indices[0]]
|
37 |
+
context = "\n- " + "\n- ".join(set(retrieved_docs))
|
38 |
+
|
39 |
+
# Format history for OpenAI
|
40 |
+
chat_history = [{"role": "system", "content": "You're a helpful luxury interior design assistant."}]
|
41 |
+
for user, bot in history:
|
42 |
+
chat_history.append({"role": "user", "content": user})
|
43 |
+
chat_history.append({"role": "assistant", "content": bot})
|
44 |
+
|
45 |
+
# Append latest query
|
46 |
+
chat_history.append({
|
47 |
+
"role": "user",
|
48 |
+
"content": f"Here are related product descriptions:{context}\n\nUser Question: {message}\n\nAnswer:"
|
49 |
+
})
|
50 |
+
|
51 |
+
# Call OpenAI GPT-3.5
|
52 |
+
completion = client.chat.completions.create(
|
53 |
+
model="gpt-3.5-turbo",
|
54 |
+
messages=chat_history,
|
55 |
+
temperature=0.7,
|
56 |
+
max_tokens=500
|
57 |
+
)
|
58 |
+
response = completion.choices[0].message.content.strip()
|
59 |
+
|
60 |
+
# Generate a follow-up question
|
61 |
+
followup_prompt = f"Based on this interior decor answer: '{response}', suggest a helpful follow-up question the user might ask next."
|
62 |
+
followup_completion = client.chat.completions.create(
|
63 |
+
model="gpt-3.5-turbo",
|
64 |
+
messages=[{"role": "user", "content": followup_prompt}],
|
65 |
+
temperature=0.7,
|
66 |
+
max_tokens=60
|
67 |
+
)
|
68 |
+
followup = followup_completion.choices[0].message.content.strip()
|
69 |
|
70 |
+
final_output = f"πͺ **Decor Response:** {response}\n\nπ **Suggested Follow-Up:** {followup}"
|
71 |
+
return final_output
|
72 |
|
73 |
# β
Gradio Chat UI
|
74 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
75 |
gr.Markdown("## πͺ Luxury Decor Assistant (RAG)")
|
76 |
+
gr.Markdown("π¬ Ask your interior design questions using real product descriptions. Powered by **DeBERTa + FAISS + OpenAI GPT-3.5**.")
|
|
|
|
|
|
|
77 |
|
78 |
+
chatbot = gr.Chatbot(label="Chatbot")
|
79 |
+
msg = gr.Textbox(label="Textbox", placeholder="e.g. Suggest cozy decor for a small bedroom", scale=8)
|
80 |
clear = gr.Button("Clear")
|
81 |
|
82 |
+
def respond(message, chat_history):
|
83 |
+
response = generate_response(message, chat_history)
|
84 |
+
chat_history.append((message, response))
|
85 |
+
return "", chat_history
|
|
|
|
|
86 |
|
87 |
+
msg.submit(respond, [msg, chatbot], [msg, chatbot])
|
88 |
+
clear.click(lambda: None, None, chatbot, queue=False)
|
89 |
|
|
|
90 |
demo.launch()
|