ajalisatgi commited on
Commit
80ac8cc
Β·
verified Β·
1 Parent(s): cf43e0d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -25
app.py CHANGED
@@ -1,45 +1,70 @@
1
  import gradio as gr
2
  import openai
 
3
  from langchain.embeddings import HuggingFaceEmbeddings
4
  from langchain_community.vectorstores import Chroma
 
5
 
6
- # Set API Key
7
- openai.api_key = "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"
8
-
9
- # Load embedding model
10
  model_name = "intfloat/e5-small"
11
  embedding_model = HuggingFaceEmbeddings(model_name=model_name)
12
 
13
- # Load ChromaDB
 
 
 
14
  persist_directory = "./docs/chroma/"
15
  vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding_model)
16
 
17
- # Define RAG function
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  def rag_pipeline(question):
19
- """Retrieve relevant documents and generate AI response"""
20
- retrieved_docs = vectordb.similarity_search(question, k=5)
21
- context = " ".join([doc.page_content for doc in retrieved_docs])
22
-
23
- # Generate AI response
24
- full_prompt = f"Context: {context}\\n\\nQuestion: {question}"
25
- response = openai migrate.create(
26
- model="gpt-4",
27
- messages=[{"role": "user", "content": full_prompt}],
28
- max_tokens=300,
29
- temperature=0.7
30
- )
31
-
32
- return response['choices'][0]['message']['content'].strip(), retrieved_docs
33
-
34
- # Gradio UI
35
  iface = gr.Interface(
36
  fn=rag_pipeline,
37
  inputs=gr.Textbox(label="Enter your question"),
38
- outputs=[gr.Textbox(label="Generated Response"), gr.Textbox(label="Retrieved Documents")],
39
- title="RAG-Based Question Answering System",
 
 
 
40
  description="Enter a question and retrieve relevant documents with AI-generated response."
41
  )
42
 
43
- # Launch Gradio app
44
  if __name__ == "__main__":
45
  iface.launch()
 
1
  import gradio as gr
2
  import openai
3
+ import os
4
  from langchain.embeddings import HuggingFaceEmbeddings
5
  from langchain_community.vectorstores import Chroma
6
+ from langchain.schema import Document
7
 
8
+ # βœ… Load the Sentence Transformer Embedding Model
 
 
 
9
  model_name = "intfloat/e5-small"
10
  embedding_model = HuggingFaceEmbeddings(model_name=model_name)
11
 
12
+ # βœ… Set up OpenAI API Key (Replace with your own API key)
13
+ openai.api_key = os.getenv("OPENAI_API_KEY") # Add in Hugging Face Secrets
14
+
15
+ # βœ… Load ChromaDB with RunGalileo Dataset
16
  persist_directory = "./docs/chroma/"
17
  vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding_model)
18
 
19
+ # βœ… Function to Retrieve Top-K Relevant Documents
20
+ def retrieve_documents(question, k=5):
21
+ """Retrieve top K relevant documents from ChromaDB"""
22
+ docs = vectordb.similarity_search(question, k=k)
23
+ if not docs:
24
+ return ["No relevant documents found."]
25
+ return [doc.page_content for doc in docs]
26
+
27
+ # βœ… Function to Generate AI Response
28
+ def generate_response(question, context):
29
+ """Generate AI response using OpenAI GPT-4"""
30
+ if not context or "No relevant documents found." in context:
31
+ return "No relevant context available. Try a different query."
32
+
33
+ full_prompt = f"Context: {context}\n\nQuestion: {question}"
34
+
35
+ try:
36
+ response = openai.ChatCompletion.create(
37
+ model="gpt-4",
38
+ messages=[
39
+ {"role": "system", "content": "You are an AI assistant that answers user queries based on the given context."},
40
+ {"role": "user", "content": full_prompt}
41
+ ],
42
+ max_tokens=300,
43
+ temperature=0.7
44
+ )
45
+ return response["choices"][0]["message"]["content"].strip()
46
+ except Exception as e:
47
+ return f"Error generating response: {str(e)}"
48
+
49
+ # βœ… Full RAG Pipeline
50
  def rag_pipeline(question):
51
+ retrieved_docs = retrieve_documents(question, k=5)
52
+ context = " ".join(retrieved_docs)
53
+ response = generate_response(question, context)
54
+ return response, "\n\n".join(retrieved_docs)
55
+
56
+ # βœ… Gradio UI Interface
 
 
 
 
 
 
 
 
 
 
57
  iface = gr.Interface(
58
  fn=rag_pipeline,
59
  inputs=gr.Textbox(label="Enter your question"),
60
+ outputs=[
61
+ gr.Textbox(label="Generated Response"),
62
+ gr.Textbox(label="Retrieved Documents")
63
+ ],
64
+ title="RAG-Based Question Answering System for RunGalileo",
65
  description="Enter a question and retrieve relevant documents with AI-generated response."
66
  )
67
 
68
+ # βœ… Launch the Gradio App
69
  if __name__ == "__main__":
70
  iface.launch()