JUNGU commited on
Commit
413ca2d
ยท
verified ยท
1 Parent(s): 14bbe59

Update rag_system.py

Browse files
Files changed (1) hide show
  1. rag_system.py +23 -2
rag_system.py CHANGED
@@ -12,6 +12,7 @@ from langchain.retrievers import ContextualCompressionRetriever
12
  from langchain.retrievers.document_compressors import LLMChainExtractor
13
  from langgraph.graph import Graph
14
  from langchain_core.runnables import RunnablePassthrough, RunnableLambda
 
15
 
16
  # Load environment variables
17
  load_dotenv()
@@ -41,11 +42,31 @@ def load_retrieval_qa_chain():
41
  base_retriever=vectorstore.as_retriever()
42
  )
43
 
44
- # Create ConversationalRetrievalChain with the new retriever
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  qa_chain = ConversationalRetrievalChain.from_llm(
46
  llm,
47
  retriever=compression_retriever,
48
- return_source_documents=True
 
49
  )
50
 
51
  return qa_chain
 
12
  from langchain.retrievers.document_compressors import LLMChainExtractor
13
  from langgraph.graph import Graph
14
  from langchain_core.runnables import RunnablePassthrough, RunnableLambda
15
+ from langchain.prompts import PromptTemplate
16
 
17
  # Load environment variables
18
  load_dotenv()
 
42
  base_retriever=vectorstore.as_retriever()
43
  )
44
 
45
+ # Define your instruction/prompt
46
+ instruction = """๋‹น์‹ ์€ RAG(Retrieval-Augmented Generation) ๊ธฐ๋ฐ˜ AI ์–ด์‹œ์Šคํ„ดํŠธ์ž…๋‹ˆ๋‹ค. ๋‹ค์Œ ์ง€์นจ์„ ๋”ฐ๋ผ ์‚ฌ์šฉ์ž ์งˆ๋ฌธ์— ๋‹ตํ•˜์„ธ์š”:
47
+
48
+ 1. ๊ฒ€์ƒ‰ ๊ฒฐ๊ณผ ํ™œ์šฉ: ์ œ๊ณต๋œ ๊ฒ€์ƒ‰ ๊ฒฐ๊ณผ๋ฅผ ๋ถ„์„ํ•˜๊ณ  ๊ด€๋ จ ์ •๋ณด๋ฅผ ์‚ฌ์šฉํ•ด ๋‹ต๋ณ€ํ•˜์„ธ์š”.
49
+ 2. ์ •ํ™•์„ฑ ์œ ์ง€: ์ •๋ณด์˜ ์ •ํ™•์„ฑ์„ ํ™•์ธํ•˜๊ณ , ๋ถˆํ™•์‹คํ•œ ๊ฒฝ์šฐ ์ด๋ฅผ ๋ช…์‹œํ•˜์„ธ์š”.
50
+ 3. ๊ฐ„๊ฒฐํ•œ ์‘๋‹ต: ์งˆ๋ฌธ์— ์ง์ ‘ ๋‹ตํ•˜๊ณ  ํ•ต์‹ฌ ๋‚ด์šฉ์— ์ง‘์ค‘ํ•˜์„ธ์š”.
51
+ 4. ์ถ”๊ฐ€ ์ •๋ณด ์ œ์•ˆ: ๊ด€๋ จ๋œ ์ถ”๊ฐ€ ์ •๋ณด๊ฐ€ ์žˆ๋‹ค๋ฉด ์–ธ๊ธ‰ํ•˜์„ธ์š”.
52
+ 5. ์œค๋ฆฌ์„ฑ ๊ณ ๋ ค: ๊ฐ๊ด€์ ์ด๊ณ  ์ค‘๋ฆฝ์ ์ธ ํƒœ๋„๋ฅผ ์œ ์ง€ํ•˜์„ธ์š”.
53
+ 6. ํ•œ๊ณ„ ์ธ์ •: ๋‹ต๋ณ€ํ•  ์ˆ˜ ์—†๋Š” ๊ฒฝ์šฐ ์†”์งํžˆ ์ธ์ •ํ•˜์„ธ์š”.
54
+ 7. ๋Œ€ํ™” ์œ ์ง€: ์ž์—ฐ์Šค๋Ÿฝ๊ฒŒ ๋Œ€ํ™”๋ฅผ ์ด์–ด๊ฐ€๊ณ , ํ•„์š”์‹œ ํ›„์† ์งˆ๋ฌธ์„ ์ œ์•ˆํ•˜์„ธ์š”.
55
+
56
+ ํ•ญ์ƒ ์ •ํ™•ํ•˜๊ณ  ์œ ์šฉํ•œ ์ •๋ณด๋ฅผ ์ œ๊ณตํ•˜๋Š” ๊ฒƒ์„ ๋ชฉํ‘œ๋กœ ํ•˜์„ธ์š”."""
57
+
58
+ # Create a prompt template
59
+ prompt_template = PromptTemplate(
60
+ input_variables=["context", "question"],
61
+ template=instruction + "\n\nContext: {context}\n\nQuestion: {question}\n\nAnswer:"
62
+ )
63
+
64
+ # Create ConversationalRetrievalChain with the new retriever and prompt
65
  qa_chain = ConversationalRetrievalChain.from_llm(
66
  llm,
67
  retriever=compression_retriever,
68
+ return_source_documents=True,
69
+ combine_docs_chain_kwargs={"prompt": prompt_template}
70
  )
71
 
72
  return qa_chain