|
from langchain.prompts import ChatPromptTemplate |
|
|
|
def make_standalone_question_chain(llm): |
|
prompt = ChatPromptTemplate.from_messages([ |
|
("system", """You are a helpful assistant that transforms user questions into standalone questions |
|
by incorporating context from the chat history if needed. The output should be a self-contained |
|
question that can be understood without any additional context. |
|
|
|
Examples: |
|
Chat History: "Let's talk about renewable energy" |
|
User Input: "What about solar?" |
|
Output: "What are the key aspects of solar energy as a renewable energy source?" |
|
|
|
Chat History: "What causes global warming?" |
|
User Input: "And what are its effects?" |
|
Output: "What are the effects of global warming on the environment and society?" |
|
"""), |
|
("user", """Chat History: {chat_history} |
|
User Question: {question} |
|
|
|
Transform this into a standalone question: |
|
Make sure to keep the original language of the question.""") |
|
]) |
|
|
|
chain = prompt | llm |
|
return chain |
|
|
|
def make_standalone_question_node(llm): |
|
standalone_chain = make_standalone_question_chain(llm) |
|
|
|
def transform_to_standalone(state): |
|
chat_history = state.get("chat_history", "") |
|
if chat_history == "": |
|
return {} |
|
output = standalone_chain.invoke({ |
|
"chat_history": chat_history, |
|
"question": state["user_input"] |
|
}) |
|
state["user_input"] = output.content |
|
return state |
|
|
|
return transform_to_standalone |
|
|