dine24 commited on
Commit
e58330f
Β·
verified Β·
1 Parent(s): 5b2daa2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -63
app.py CHANGED
@@ -1,19 +1,17 @@
1
  import os
2
  import gradio as gr
3
  import pandas as pd
4
- import numpy as np
5
  import faiss
6
  import torch
7
- from datetime import datetime
8
  from transformers import AutoTokenizer, AutoModel
9
- from openai import OpenAI, OpenAIError
 
10
 
11
- # βœ… Load Hugging Face and OpenAI credentials
12
- HF_TOKEN = os.getenv("HF_TOKEN")
13
- OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
14
- client = OpenAI(api_key=OPENAI_API_KEY)
15
 
16
- # βœ… Load FAISS index and product data
17
  index = faiss.read_index("deberta_faiss.index")
18
  text_data = pd.read_csv("deberta_text_data.csv")["Retrieved Text"].tolist()
19
 
@@ -22,75 +20,71 @@ deberta_model_name = "microsoft/deberta-v3-base"
22
  deberta_tokenizer = AutoTokenizer.from_pretrained(deberta_model_name)
23
  deberta_model = AutoModel.from_pretrained(deberta_model_name).to("cpu")
24
 
25
- # βœ… Helper: Generate embedding from DeBERTa
26
- def generate_embeddings(queries):
27
- tokens = deberta_tokenizer(queries, return_tensors="pt", padding=True, truncation=True).to("cpu")
28
  with torch.no_grad():
29
  embeddings = deberta_model(**tokens).last_hidden_state.mean(dim=1).cpu().numpy().astype("float32")
30
  return embeddings
31
 
32
- # βœ… Main logic: Compose RAG prompt and get GPT-3.5 response
33
- def generate_response(user_message, chat_history):
34
- # Embed the query and retrieve docs
35
- query_embedding = generate_embeddings([user_message])
36
  faiss.normalize_L2(query_embedding)
37
- distances, indices = index.search(query_embedding, k=5)
38
  retrieved_docs = [text_data[i] for i in indices[0]]
39
- context = "\n".join(set(retrieved_docs))
40
-
41
- # Build system prompt with context
42
- system_prompt = f"""
43
- You are a luxury home decor assistant. Use the product context below to answer questions about home interiors.
44
-
45
- Product Descriptions:
46
- {context}
47
-
48
- Always provide helpful, elegant, and context-aware interior design suggestions.
49
- Also suggest a follow-up question based on the user query to keep the conversation going.
50
- """
51
-
52
- messages = [{"role": "system", "content": system_prompt}]
53
-
54
- for message in chat_history:
55
- messages.append({"role": "user", "content": message[0]})
56
- messages.append({"role": "assistant", "content": message[1]})
57
-
58
- messages.append({"role": "user", "content": user_message})
59
-
60
- try:
61
- response = client.chat.completions.create(
62
- model="gpt-3.5-turbo",
63
- messages=messages,
64
- temperature=0.7,
65
- max_tokens=512
66
- )
67
- reply = response.choices[0].message.content.strip()
68
- except OpenAIError as e:
69
- reply = f"❌ Error: {str(e)}"
 
70
 
71
- return reply
 
72
 
73
  # βœ… Gradio Chat UI
74
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
75
  gr.Markdown("## πŸͺ‘ Luxury Decor Assistant (RAG)")
76
- gr.Markdown(
77
- "πŸ’¬ Ask your interior design questions using real product descriptions. "
78
- "Powered by **DeBERTa + FAISS** β†’ now upgraded to **OpenAI GPT-3.5** for enhanced answers."
79
- )
80
 
81
- chatbot = gr.Chatbot(label="Chatbot", height=400)
82
- msg = gr.Textbox(label="Textbox", placeholder="e.g. Suggest cozy decor for Neha Study room")
83
  clear = gr.Button("Clear")
84
 
85
- def respond(message, history):
86
- if history is None:
87
- history = []
88
- response = generate_response(message, history)
89
- history.append((message, response))
90
- return history, ""
91
 
92
- msg.submit(respond, [msg, chatbot], [chatbot, msg])
93
- clear.click(lambda: ([], ""), None, [chatbot, msg])
94
 
95
- # βœ… Launch the app
96
  demo.launch()
 
1
  import os
2
  import gradio as gr
3
  import pandas as pd
 
4
  import faiss
5
  import torch
 
6
  from transformers import AutoTokenizer, AutoModel
7
+ from openai import OpenAI
8
+ from huggingface_hub import login
9
 
10
+ # βœ… Load environment secrets securely
11
+ login(token=os.getenv("HF_TOKEN")) # Hugging Face login
12
+ client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) # OpenAI GPT-3.5 auth
 
13
 
14
+ # βœ… Load FAISS index and product descriptions
15
  index = faiss.read_index("deberta_faiss.index")
16
  text_data = pd.read_csv("deberta_text_data.csv")["Retrieved Text"].tolist()
17
 
 
20
  deberta_tokenizer = AutoTokenizer.from_pretrained(deberta_model_name)
21
  deberta_model = AutoModel.from_pretrained(deberta_model_name).to("cpu")
22
 
23
+ # βœ… Embedding function
24
+ def generate_embeddings(texts):
25
+ tokens = deberta_tokenizer(texts, return_tensors="pt", padding=True, truncation=True).to("cpu")
26
  with torch.no_grad():
27
  embeddings = deberta_model(**tokens).last_hidden_state.mean(dim=1).cpu().numpy().astype("float32")
28
  return embeddings
29
 
30
+ # βœ… RAG-based response using OpenAI
31
+ def generate_response(message, history):
32
+ # Embed and retrieve context
33
+ query_embedding = generate_embeddings([message])
34
  faiss.normalize_L2(query_embedding)
35
+ _, indices = index.search(query_embedding, k=5)
36
  retrieved_docs = [text_data[i] for i in indices[0]]
37
+ context = "\n- " + "\n- ".join(set(retrieved_docs))
38
+
39
+ # Format history for OpenAI
40
+ chat_history = [{"role": "system", "content": "You're a helpful luxury interior design assistant."}]
41
+ for user, bot in history:
42
+ chat_history.append({"role": "user", "content": user})
43
+ chat_history.append({"role": "assistant", "content": bot})
44
+
45
+ # Append latest query
46
+ chat_history.append({
47
+ "role": "user",
48
+ "content": f"Here are related product descriptions:{context}\n\nUser Question: {message}\n\nAnswer:"
49
+ })
50
+
51
+ # Call OpenAI GPT-3.5
52
+ completion = client.chat.completions.create(
53
+ model="gpt-3.5-turbo",
54
+ messages=chat_history,
55
+ temperature=0.7,
56
+ max_tokens=500
57
+ )
58
+ response = completion.choices[0].message.content.strip()
59
+
60
+ # Generate a follow-up question
61
+ followup_prompt = f"Based on this interior decor answer: '{response}', suggest a helpful follow-up question the user might ask next."
62
+ followup_completion = client.chat.completions.create(
63
+ model="gpt-3.5-turbo",
64
+ messages=[{"role": "user", "content": followup_prompt}],
65
+ temperature=0.7,
66
+ max_tokens=60
67
+ )
68
+ followup = followup_completion.choices[0].message.content.strip()
69
 
70
+ final_output = f"πŸͺ‘ **Decor Response:** {response}\n\nπŸ”„ **Suggested Follow-Up:** {followup}"
71
+ return final_output
72
 
73
  # βœ… Gradio Chat UI
74
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
75
  gr.Markdown("## πŸͺ‘ Luxury Decor Assistant (RAG)")
76
+ gr.Markdown("πŸ’¬ Ask your interior design questions using real product descriptions. Powered by **DeBERTa + FAISS + OpenAI GPT-3.5**.")
 
 
 
77
 
78
+ chatbot = gr.Chatbot(label="Chatbot")
79
+ msg = gr.Textbox(label="Textbox", placeholder="e.g. Suggest cozy decor for a small bedroom", scale=8)
80
  clear = gr.Button("Clear")
81
 
82
+ def respond(message, chat_history):
83
+ response = generate_response(message, chat_history)
84
+ chat_history.append((message, response))
85
+ return "", chat_history
 
 
86
 
87
+ msg.submit(respond, [msg, chatbot], [msg, chatbot])
88
+ clear.click(lambda: None, None, chatbot, queue=False)
89
 
 
90
  demo.launch()