dine24 commited on
Commit
2afe455
Β·
verified Β·
1 Parent(s): 0a775ba

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +66 -61
app.py CHANGED
@@ -1,91 +1,96 @@
1
  import os
2
  import gradio as gr
3
  import pandas as pd
 
4
  import faiss
5
  import torch
6
- from transformers import AutoTokenizer, AutoModel
7
- from openai import OpenAI
8
  from datetime import datetime
 
 
9
 
10
- # βœ… Hugging Face secret & OpenAI token setup
11
  HF_TOKEN = os.getenv("HF_TOKEN")
12
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
13
  client = OpenAI(api_key=OPENAI_API_KEY)
14
 
15
- # βœ… Load DeBERTa model for embeddings
 
 
 
 
16
  deberta_model_name = "microsoft/deberta-v3-base"
17
  deberta_tokenizer = AutoTokenizer.from_pretrained(deberta_model_name)
18
  deberta_model = AutoModel.from_pretrained(deberta_model_name).to("cpu")
19
 
20
- # βœ… Load FAISS index + data
21
- index = faiss.read_index("deberta_faiss.index")
22
- text_data = pd.read_csv("deberta_text_data.csv")["Retrieved Text"].tolist()
23
-
24
- # βœ… Embedding generator
25
  def generate_embeddings(queries):
26
- inputs = deberta_tokenizer(queries, return_tensors="pt", padding=True, truncation=True).to("cpu")
27
  with torch.no_grad():
28
- embeddings = deberta_model(**inputs).last_hidden_state.mean(dim=1).cpu().numpy().astype("float32")
29
  return embeddings
30
 
31
- # βœ… Query + context β†’ prompt for GPT-3.5
32
- def generate_response(user_query, history):
33
- # Embed the query and retrieve top documents
34
- query_embedding = generate_embeddings([user_query])
35
  faiss.normalize_L2(query_embedding)
36
  distances, indices = index.search(query_embedding, k=5)
37
- context_docs = [text_data[i] for i in indices[0]]
38
- context = "\n".join(context_docs)
39
-
40
- # Construct the chat history for OpenAI
41
- messages = [{"role": "system", "content": "You are a helpful interior decor assistant. Use luxury home decor examples and retrieved documents to answer queries."}]
42
- for user_msg, bot_reply in history:
43
- messages.append({"role": "user", "content": user_msg})
44
- messages.append({"role": "assistant", "content": bot_reply})
45
-
46
- # Add the latest user query
47
- messages.append({
48
- "role": "user",
49
- "content": f"Context:\n{context}\n\nUser Query:\n{user_query}\n\nGive a helpful, elegant, practical luxury decor suggestion."
50
- })
51
-
52
- # Call OpenAI GPT-3.5
53
- response = client.chat.completions.create(
54
- model="gpt-3.5-turbo",
55
- messages=messages,
56
- max_tokens=512,
57
- temperature=0.7
58
- )
59
-
60
- decor_response = response.choices[0].message.content.strip()
61
-
62
- # Follow-up question prompt
63
- followup_prompt = f"Given the user's query: '{user_query}' and the answer: '{decor_response}', suggest a relevant follow-up decor question."
64
- followup = client.chat.completions.create(
65
- model="gpt-3.5-turbo",
66
- messages=[{"role": "user", "content": followup_prompt}],
67
- max_tokens=60
68
- ).choices[0].message.content.strip()
69
-
70
- full_reply = f"πŸͺ‘ **Decor Response:** {decor_response}\n\nπŸ”„ **Suggested Follow-Up:** {followup}"
71
- return full_reply
72
-
73
- # βœ… Gradio chatbot interface
74
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
75
- gr.Markdown("### πŸ›‹οΈ **Luxury Decor Assistant (RAG)**\nAsk your interior design questions using real product descriptions. Powered by **DeBERTa + FAISS + Flan-T5** β†’ now upgraded to **OpenAI GPT-3.5** for enhanced answers.")
 
 
 
 
76
 
77
- chatbot = gr.Chatbot(label="Chatbot", show_label=True)
78
- user_input = gr.Textbox(label="Textbox", placeholder="e.g. Suggest cozy decor for Neha Study room", show_label=True)
79
- clear_btn = gr.Button("Clear")
80
 
81
  def respond(message, history):
82
  if history is None:
83
  history = []
84
- reply = generate_response(message, history)
85
- history.append((message, reply))
86
- return history, history
87
 
88
- user_input.submit(respond, [user_input, chatbot], [chatbot, chatbot])
89
- clear_btn.click(lambda: ([], ""), None, [chatbot, user_input])
90
 
 
91
  demo.launch()
 
1
  import os
2
  import gradio as gr
3
  import pandas as pd
4
+ import numpy as np
5
  import faiss
6
  import torch
 
 
7
  from datetime import datetime
8
+ from transformers import AutoTokenizer, AutoModel
9
+ from openai import OpenAI, OpenAIError
10
 
11
+ # βœ… Load Hugging Face and OpenAI credentials
12
  HF_TOKEN = os.getenv("HF_TOKEN")
13
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
14
  client = OpenAI(api_key=OPENAI_API_KEY)
15
 
16
+ # βœ… Load FAISS index and product data
17
+ index = faiss.read_index("deberta_faiss.index")
18
+ text_data = pd.read_csv("deberta_text_data.csv")["Retrieved Text"].tolist()
19
+
20
+ # βœ… Load DeBERTa for embedding
21
  deberta_model_name = "microsoft/deberta-v3-base"
22
  deberta_tokenizer = AutoTokenizer.from_pretrained(deberta_model_name)
23
  deberta_model = AutoModel.from_pretrained(deberta_model_name).to("cpu")
24
 
25
+ # βœ… Helper: Generate embedding from DeBERTa
 
 
 
 
26
  def generate_embeddings(queries):
27
+ tokens = deberta_tokenizer(queries, return_tensors="pt", padding=True, truncation=True).to("cpu")
28
  with torch.no_grad():
29
+ embeddings = deberta_model(**tokens).last_hidden_state.mean(dim=1).cpu().numpy().astype("float32")
30
  return embeddings
31
 
32
+ # βœ… Main logic: Compose RAG prompt and get GPT-3.5 response
33
+ def generate_response(user_message, chat_history):
34
+ # Embed the query and retrieve docs
35
+ query_embedding = generate_embeddings([user_message])
36
  faiss.normalize_L2(query_embedding)
37
  distances, indices = index.search(query_embedding, k=5)
38
+ retrieved_docs = [text_data[i] for i in indices[0]]
39
+ context = "\n".join(set(retrieved_docs))
40
+
41
+ # Build system prompt with context
42
+ system_prompt = f"""
43
+ You are a luxury home decor assistant. Use the product context below to answer questions about home interiors.
44
+
45
+ Product Descriptions:
46
+ {context}
47
+
48
+ Always provide helpful, elegant, and context-aware interior design suggestions.
49
+ Also suggest a follow-up question based on the user query to keep the conversation going.
50
+ """
51
+
52
+ messages = [{"role": "system", "content": system_prompt}]
53
+
54
+ for message in chat_history:
55
+ messages.append({"role": "user", "content": message[0]})
56
+ messages.append({"role": "assistant", "content": message[1]})
57
+
58
+ messages.append({"role": "user", "content": user_message})
59
+
60
+ try:
61
+ response = client.chat.completions.create(
62
+ model="gpt-3.5-turbo",
63
+ messages=messages,
64
+ temperature=0.7,
65
+ max_tokens=512
66
+ )
67
+ reply = response.choices[0].message.content.strip()
68
+ except OpenAIError as e:
69
+ reply = f"❌ Error: {str(e)}"
70
+
71
+ return reply
72
+
73
+ # βœ… Gradio Chat UI
 
74
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
75
+ gr.Markdown("## πŸͺ‘ Luxury Decor Assistant (RAG)")
76
+ gr.Markdown(
77
+ "πŸ’¬ Ask your interior design questions using real product descriptions. "
78
+ "Powered by **DeBERTa + FAISS** β†’ now upgraded to **OpenAI GPT-3.5** for enhanced answers."
79
+ )
80
 
81
+ chatbot = gr.Chatbot(label="Chatbot", height=400)
82
+ msg = gr.Textbox(label="Textbox", placeholder="e.g. Suggest cozy decor for Neha Study room")
83
+ clear = gr.Button("Clear")
84
 
85
  def respond(message, history):
86
  if history is None:
87
  history = []
88
+ response = generate_response(message, history)
89
+ history.append((message, response))
90
+ return history, ""
91
 
92
+ msg.submit(respond, [msg, chatbot], [chatbot, msg])
93
+ clear.click(lambda: ([], ""), None, [chatbot, msg])
94
 
95
+ # βœ… Launch the app
96
  demo.launch()