Merge branch 'dev' of https://bitbucket.org/ekimetrics/climate_qa into dev
Browse files- app.py +52 -121
- climateqa/chat.py +11 -3
- climateqa/engine/chains/answer_rag.py +4 -2
- climateqa/engine/chains/follow_up.py +32 -0
- climateqa/engine/chains/standalone_question.py +39 -0
- climateqa/engine/graph.py +36 -10
- front/tabs/__init__.py +4 -1
- front/tabs/chat_interface.py +5 -2
- front/tabs/main_tab.py +59 -27
- front/tabs/tab_config.py +17 -27
- style.css +10 -19
app.py
CHANGED
@@ -15,7 +15,8 @@ from climateqa.chat import start_chat, chat_stream, finish_chat
|
|
15 |
from climateqa.engine.talk_to_data.main import ask_vanna
|
16 |
from climateqa.engine.talk_to_data.myVanna import MyVanna
|
17 |
|
18 |
-
from front.tabs import (create_config_modal,
|
|
|
19 |
from front.utils import process_figures
|
20 |
from gradio_modal import Modal
|
21 |
|
@@ -136,81 +137,14 @@ def create_drias_tab():
|
|
136 |
|
137 |
vanna_display = gr.Plot()
|
138 |
vanna_direct_question.submit(ask_vanna_query, [vanna_direct_question], [vanna_sql_query ,vanna_table, vanna_display])
|
139 |
-
|
140 |
-
# # UI Layout Components
|
141 |
-
def cqa_tab(tab_name):
|
142 |
-
# State variables
|
143 |
-
current_graphs = gr.State([])
|
144 |
-
with gr.Tab(tab_name):
|
145 |
-
with gr.Row(elem_id="chatbot-row"):
|
146 |
-
# Left column - Chat interface
|
147 |
-
with gr.Column(scale=2):
|
148 |
-
chatbot, textbox, config_button = create_chat_interface(tab_name)
|
149 |
-
|
150 |
-
# Right column - Content panels
|
151 |
-
with gr.Column(scale=2, variant="panel", elem_id="right-panel"):
|
152 |
-
with gr.Tabs(elem_id="right_panel_tab") as tabs:
|
153 |
-
# Examples tab
|
154 |
-
with gr.TabItem("Examples", elem_id="tab-examples", id=0):
|
155 |
-
examples_hidden = create_examples_tab(tab_name)
|
156 |
-
|
157 |
-
# Sources tab
|
158 |
-
with gr.Tab("Sources", elem_id="tab-sources", id=1) as tab_sources:
|
159 |
-
sources_textbox = gr.HTML(show_label=False, elem_id="sources-textbox")
|
160 |
-
|
161 |
-
|
162 |
-
# Recommended content tab
|
163 |
-
with gr.Tab("Recommended content", elem_id="tab-recommended_content", id=2) as tab_recommended_content:
|
164 |
-
with gr.Tabs(elem_id="group-subtabs") as tabs_recommended_content:
|
165 |
-
# Figures subtab
|
166 |
-
with gr.Tab("Figures", elem_id="tab-figures", id=3) as tab_figures:
|
167 |
-
sources_raw, new_figures, used_figures, gallery_component, figures_cards, figure_modal = create_figures_tab()
|
168 |
-
|
169 |
-
# Papers subtab
|
170 |
-
with gr.Tab("Papers", elem_id="tab-citations", id=4) as tab_papers:
|
171 |
-
papers_direct_search, papers_summary, papers_html, citations_network, papers_modal = create_papers_tab()
|
172 |
-
|
173 |
-
# Graphs subtab
|
174 |
-
with gr.Tab("Graphs", elem_id="tab-graphs", id=5) as tab_graphs:
|
175 |
-
graphs_container = gr.HTML(
|
176 |
-
"<h2>There are no graphs to be displayed at the moment. Try asking another question.</h2>",
|
177 |
-
elem_id="graphs-container"
|
178 |
-
)
|
179 |
-
|
180 |
-
|
181 |
-
return {
|
182 |
-
"chatbot": chatbot,
|
183 |
-
"textbox": textbox,
|
184 |
-
"tabs": tabs,
|
185 |
-
"sources_raw": sources_raw,
|
186 |
-
"new_figures": new_figures,
|
187 |
-
"current_graphs": current_graphs,
|
188 |
-
"examples_hidden": examples_hidden,
|
189 |
-
"sources_textbox": sources_textbox,
|
190 |
-
"figures_cards": figures_cards,
|
191 |
-
"gallery_component": gallery_component,
|
192 |
-
"config_button": config_button,
|
193 |
-
"papers_direct_search" : papers_direct_search,
|
194 |
-
"papers_html": papers_html,
|
195 |
-
"citations_network": citations_network,
|
196 |
-
"papers_summary": papers_summary,
|
197 |
-
"tab_recommended_content": tab_recommended_content,
|
198 |
-
"tab_sources": tab_sources,
|
199 |
-
"tab_figures": tab_figures,
|
200 |
-
"tab_graphs": tab_graphs,
|
201 |
-
"tab_papers": tab_papers,
|
202 |
-
"graph_container": graphs_container,
|
203 |
-
# "vanna_sql_query": vanna_sql_query,
|
204 |
-
# "vanna_table" : vanna_table,
|
205 |
-
# "vanna_display": vanna_display
|
206 |
-
}
|
207 |
|
208 |
-
def config_event_handling(main_tabs_components : list[
|
209 |
-
config_open = config_componenets
|
210 |
-
config_modal = config_componenets
|
211 |
-
close_config_modal = config_componenets
|
212 |
|
213 |
-
for button in [close_config_modal] + [main_tab_component
|
214 |
button.click(
|
215 |
fn=update_config_modal_visibility,
|
216 |
inputs=[config_open],
|
@@ -218,58 +152,45 @@ def config_event_handling(main_tabs_components : list[dict], config_componenets
|
|
218 |
)
|
219 |
|
220 |
def event_handling(
|
221 |
-
main_tab_components,
|
222 |
-
config_components,
|
223 |
tab_name="ClimateQ&A"
|
224 |
):
|
225 |
-
chatbot = main_tab_components
|
226 |
-
textbox = main_tab_components
|
227 |
-
tabs = main_tab_components
|
228 |
-
sources_raw = main_tab_components
|
229 |
-
new_figures = main_tab_components
|
230 |
-
current_graphs = main_tab_components
|
231 |
-
examples_hidden = main_tab_components
|
232 |
-
sources_textbox = main_tab_components
|
233 |
-
figures_cards = main_tab_components
|
234 |
-
gallery_component = main_tab_components
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
# vanna_table = main_tab_components["vanna_table"]
|
248 |
-
# vanna_display = main_tab_components["vanna_display"]
|
249 |
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
after = config_components["after"]
|
259 |
-
output_query = config_components["output_query"]
|
260 |
-
output_language = config_components["output_language"]
|
261 |
-
# close_config_modal = config_components["close_config_modal_button"]
|
262 |
|
263 |
new_sources_hmtl = gr.State([])
|
264 |
ttd_data = gr.State([])
|
265 |
|
266 |
-
|
267 |
-
# for button in [config_button, close_config_modal]:
|
268 |
-
# button.click(
|
269 |
-
# fn=update_config_modal_visibility,
|
270 |
-
# inputs=[config_open],
|
271 |
-
# outputs=[config_modal, config_open]
|
272 |
-
# )
|
273 |
|
274 |
if tab_name == "ClimateQ&A":
|
275 |
print("chat cqa - message sent")
|
@@ -277,15 +198,20 @@ def event_handling(
|
|
277 |
# Event for textbox
|
278 |
(textbox
|
279 |
.submit(start_chat, [textbox, chatbot, search_only], [textbox, tabs, chatbot, sources_raw], queue=False, api_name=f"start_chat_{textbox.elem_id}")
|
280 |
-
.then(chat, [textbox, chatbot, dropdown_audience, dropdown_sources, dropdown_reports, dropdown_external_sources, search_only], [chatbot, new_sources_hmtl, output_query, output_language, new_figures, current_graphs], concurrency_limit=8, api_name=f"chat_{textbox.elem_id}")
|
281 |
.then(finish_chat, None, [textbox], api_name=f"finish_chat_{textbox.elem_id}")
|
282 |
)
|
283 |
# Event for examples_hidden
|
284 |
(examples_hidden
|
285 |
.change(start_chat, [examples_hidden, chatbot, search_only], [examples_hidden, tabs, chatbot, sources_raw], queue=False, api_name=f"start_chat_{examples_hidden.elem_id}")
|
286 |
-
.then(chat, [examples_hidden, chatbot, dropdown_audience, dropdown_sources, dropdown_reports, dropdown_external_sources, search_only], [chatbot, new_sources_hmtl, output_query, output_language, new_figures, current_graphs], concurrency_limit=8, api_name=f"chat_{examples_hidden.elem_id}")
|
287 |
.then(finish_chat, None, [textbox], api_name=f"finish_chat_{examples_hidden.elem_id}")
|
288 |
)
|
|
|
|
|
|
|
|
|
|
|
289 |
|
290 |
elif tab_name == "Beta - POC Adapt'Action":
|
291 |
print("chat poc - message sent")
|
@@ -301,6 +227,11 @@ def event_handling(
|
|
301 |
.then(chat_poc, [examples_hidden, chatbot, dropdown_audience, dropdown_sources, dropdown_reports, dropdown_external_sources, search_only], [chatbot, new_sources_hmtl, output_query, output_language, new_figures, current_graphs], concurrency_limit=8, api_name=f"chat_{examples_hidden.elem_id}")
|
302 |
.then(finish_chat, None, [textbox], api_name=f"finish_chat_{examples_hidden.elem_id}")
|
303 |
)
|
|
|
|
|
|
|
|
|
|
|
304 |
|
305 |
|
306 |
new_sources_hmtl.change(lambda x : x, inputs = [new_sources_hmtl], outputs = [sources_textbox])
|
|
|
15 |
from climateqa.engine.talk_to_data.main import ask_vanna
|
16 |
from climateqa.engine.talk_to_data.myVanna import MyVanna
|
17 |
|
18 |
+
from front.tabs import (create_config_modal, cqa_tab, create_about_tab)
|
19 |
+
from front.tabs import (MainTabPanel, ConfigPanel)
|
20 |
from front.utils import process_figures
|
21 |
from gradio_modal import Modal
|
22 |
|
|
|
137 |
|
138 |
vanna_display = gr.Plot()
|
139 |
vanna_direct_question.submit(ask_vanna_query, [vanna_direct_question], [vanna_sql_query ,vanna_table, vanna_display])
|
140 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
141 |
|
142 |
+
def config_event_handling(main_tabs_components : list[MainTabPanel], config_componenets : ConfigPanel):
|
143 |
+
config_open = config_componenets.config_open
|
144 |
+
config_modal = config_componenets.config_modal
|
145 |
+
close_config_modal = config_componenets.close_config_modal_button
|
146 |
|
147 |
+
for button in [close_config_modal] + [main_tab_component.config_button for main_tab_component in main_tabs_components]:
|
148 |
button.click(
|
149 |
fn=update_config_modal_visibility,
|
150 |
inputs=[config_open],
|
|
|
152 |
)
|
153 |
|
154 |
def event_handling(
|
155 |
+
main_tab_components : MainTabPanel,
|
156 |
+
config_components : ConfigPanel,
|
157 |
tab_name="ClimateQ&A"
|
158 |
):
|
159 |
+
chatbot = main_tab_components.chatbot
|
160 |
+
textbox = main_tab_components.textbox
|
161 |
+
tabs = main_tab_components.tabs
|
162 |
+
sources_raw = main_tab_components.sources_raw
|
163 |
+
new_figures = main_tab_components.new_figures
|
164 |
+
current_graphs = main_tab_components.current_graphs
|
165 |
+
examples_hidden = main_tab_components.examples_hidden
|
166 |
+
sources_textbox = main_tab_components.sources_textbox
|
167 |
+
figures_cards = main_tab_components.figures_cards
|
168 |
+
gallery_component = main_tab_components.gallery_component
|
169 |
+
papers_direct_search = main_tab_components.papers_direct_search
|
170 |
+
papers_html = main_tab_components.papers_html
|
171 |
+
citations_network = main_tab_components.citations_network
|
172 |
+
papers_summary = main_tab_components.papers_summary
|
173 |
+
tab_recommended_content = main_tab_components.tab_recommended_content
|
174 |
+
tab_sources = main_tab_components.tab_sources
|
175 |
+
tab_figures = main_tab_components.tab_figures
|
176 |
+
tab_graphs = main_tab_components.tab_graphs
|
177 |
+
tab_papers = main_tab_components.tab_papers
|
178 |
+
graphs_container = main_tab_components.graph_container
|
179 |
+
follow_up_examples = main_tab_components.follow_up_examples
|
180 |
+
follow_up_examples_hidden = main_tab_components.follow_up_examples_hidden
|
|
|
|
|
181 |
|
182 |
+
dropdown_sources = config_components.dropdown_sources
|
183 |
+
dropdown_reports = config_components.dropdown_reports
|
184 |
+
dropdown_external_sources = config_components.dropdown_external_sources
|
185 |
+
search_only = config_components.search_only
|
186 |
+
dropdown_audience = config_components.dropdown_audience
|
187 |
+
after = config_components.after
|
188 |
+
output_query = config_components.output_query
|
189 |
+
output_language = config_components.output_language
|
|
|
|
|
|
|
|
|
190 |
|
191 |
new_sources_hmtl = gr.State([])
|
192 |
ttd_data = gr.State([])
|
193 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
194 |
|
195 |
if tab_name == "ClimateQ&A":
|
196 |
print("chat cqa - message sent")
|
|
|
198 |
# Event for textbox
|
199 |
(textbox
|
200 |
.submit(start_chat, [textbox, chatbot, search_only], [textbox, tabs, chatbot, sources_raw], queue=False, api_name=f"start_chat_{textbox.elem_id}")
|
201 |
+
.then(chat, [textbox, chatbot, dropdown_audience, dropdown_sources, dropdown_reports, dropdown_external_sources, search_only], [chatbot, new_sources_hmtl, output_query, output_language, new_figures, current_graphs, follow_up_examples.dataset], concurrency_limit=8, api_name=f"chat_{textbox.elem_id}")
|
202 |
.then(finish_chat, None, [textbox], api_name=f"finish_chat_{textbox.elem_id}")
|
203 |
)
|
204 |
# Event for examples_hidden
|
205 |
(examples_hidden
|
206 |
.change(start_chat, [examples_hidden, chatbot, search_only], [examples_hidden, tabs, chatbot, sources_raw], queue=False, api_name=f"start_chat_{examples_hidden.elem_id}")
|
207 |
+
.then(chat, [examples_hidden, chatbot, dropdown_audience, dropdown_sources, dropdown_reports, dropdown_external_sources, search_only], [chatbot, new_sources_hmtl, output_query, output_language, new_figures, current_graphs,follow_up_examples.dataset], concurrency_limit=8, api_name=f"chat_{examples_hidden.elem_id}")
|
208 |
.then(finish_chat, None, [textbox], api_name=f"finish_chat_{examples_hidden.elem_id}")
|
209 |
)
|
210 |
+
(follow_up_examples_hidden
|
211 |
+
.change(start_chat, [follow_up_examples_hidden, chatbot, search_only], [follow_up_examples_hidden, tabs, chatbot, sources_raw], queue=False, api_name=f"start_chat_{examples_hidden.elem_id}")
|
212 |
+
.then(chat, [follow_up_examples_hidden, chatbot, dropdown_audience, dropdown_sources, dropdown_reports, dropdown_external_sources, search_only], [chatbot, new_sources_hmtl, output_query, output_language, new_figures, current_graphs,follow_up_examples.dataset], concurrency_limit=8, api_name=f"chat_{examples_hidden.elem_id}")
|
213 |
+
.then(finish_chat, None, [textbox], api_name=f"finish_chat_{follow_up_examples_hidden.elem_id}")
|
214 |
+
)
|
215 |
|
216 |
elif tab_name == "Beta - POC Adapt'Action":
|
217 |
print("chat poc - message sent")
|
|
|
227 |
.then(chat_poc, [examples_hidden, chatbot, dropdown_audience, dropdown_sources, dropdown_reports, dropdown_external_sources, search_only], [chatbot, new_sources_hmtl, output_query, output_language, new_figures, current_graphs], concurrency_limit=8, api_name=f"chat_{examples_hidden.elem_id}")
|
228 |
.then(finish_chat, None, [textbox], api_name=f"finish_chat_{examples_hidden.elem_id}")
|
229 |
)
|
230 |
+
(follow_up_examples_hidden
|
231 |
+
.change(start_chat, [follow_up_examples_hidden, chatbot, search_only], [follow_up_examples_hidden, tabs, chatbot, sources_raw], queue=False, api_name=f"start_chat_{examples_hidden.elem_id}")
|
232 |
+
.then(chat, [follow_up_examples_hidden, chatbot, dropdown_audience, dropdown_sources, dropdown_reports, dropdown_external_sources, search_only], [chatbot, new_sources_hmtl, output_query, output_language, new_figures, current_graphs,follow_up_examples.dataset], concurrency_limit=8, api_name=f"chat_{examples_hidden.elem_id}")
|
233 |
+
.then(finish_chat, None, [textbox], api_name=f"finish_chat_{follow_up_examples_hidden.elem_id}")
|
234 |
+
)
|
235 |
|
236 |
|
237 |
new_sources_hmtl.change(lambda x : x, inputs = [new_sources_hmtl], outputs = [sources_textbox])
|
climateqa/chat.py
CHANGED
@@ -101,6 +101,7 @@ async def chat_stream(
|
|
101 |
audience_prompt = init_audience(audience)
|
102 |
sources = sources or ["IPCC", "IPBES"]
|
103 |
reports = reports or []
|
|
|
104 |
|
105 |
# Prepare inputs for agent
|
106 |
inputs = {
|
@@ -109,7 +110,8 @@ async def chat_stream(
|
|
109 |
"sources_input": sources,
|
110 |
"relevant_content_sources_selection": relevant_content_sources_selection,
|
111 |
"search_only": search_only,
|
112 |
-
"reports": reports
|
|
|
113 |
}
|
114 |
|
115 |
# Get streaming events from agent
|
@@ -129,6 +131,7 @@ async def chat_stream(
|
|
129 |
retrieved_contents = []
|
130 |
answer_message_content = ""
|
131 |
vanna_data = {}
|
|
|
132 |
|
133 |
# Define processing steps
|
134 |
steps_display = {
|
@@ -200,7 +203,12 @@ async def chat_stream(
|
|
200 |
sub_questions = [q["question"] + "-> relevant sources : " + str(q["sources"]) for q in event["data"]["output"]["questions_list"]]
|
201 |
history[-1].content += "Decompose question into sub-questions:\n\n - " + "\n - ".join(sub_questions)
|
202 |
|
203 |
-
|
|
|
|
|
|
|
|
|
|
|
204 |
|
205 |
except Exception as e:
|
206 |
print(f"Event {event} has failed")
|
@@ -211,4 +219,4 @@ async def chat_stream(
|
|
211 |
# Call the function to log interaction
|
212 |
log_interaction_to_azure(history, output_query, sources, docs, share_client, user_id)
|
213 |
|
214 |
-
yield history, docs_html, output_query, output_language, related_contents, graphs_html#, vanna_data
|
|
|
101 |
audience_prompt = init_audience(audience)
|
102 |
sources = sources or ["IPCC", "IPBES"]
|
103 |
reports = reports or []
|
104 |
+
relevant_history_discussion = history[-2:] if len(history) > 1 else []
|
105 |
|
106 |
# Prepare inputs for agent
|
107 |
inputs = {
|
|
|
110 |
"sources_input": sources,
|
111 |
"relevant_content_sources_selection": relevant_content_sources_selection,
|
112 |
"search_only": search_only,
|
113 |
+
"reports": reports,
|
114 |
+
"chat_history": relevant_history_discussion,
|
115 |
}
|
116 |
|
117 |
# Get streaming events from agent
|
|
|
131 |
retrieved_contents = []
|
132 |
answer_message_content = ""
|
133 |
vanna_data = {}
|
134 |
+
follow_up_examples = gr.Dataset(samples=[])
|
135 |
|
136 |
# Define processing steps
|
137 |
steps_display = {
|
|
|
203 |
sub_questions = [q["question"] + "-> relevant sources : " + str(q["sources"]) for q in event["data"]["output"]["questions_list"]]
|
204 |
history[-1].content += "Decompose question into sub-questions:\n\n - " + "\n - ".join(sub_questions)
|
205 |
|
206 |
+
# Handle follow up questions
|
207 |
+
if event["name"] == "generate_follow_up" and event["event"] == "on_chain_end":
|
208 |
+
follow_up_examples = event["data"]["output"].get("follow_up_questions", [])
|
209 |
+
follow_up_examples = gr.Dataset(samples= [ [question] for question in follow_up_examples ])
|
210 |
+
|
211 |
+
yield history, docs_html, output_query, output_language, related_contents, graphs_html, follow_up_examples#, vanna_data
|
212 |
|
213 |
except Exception as e:
|
214 |
print(f"Event {event} has failed")
|
|
|
219 |
# Call the function to log interaction
|
220 |
log_interaction_to_azure(history, output_query, sources, docs, share_client, user_id)
|
221 |
|
222 |
+
yield history, docs_html, output_query, output_language, related_contents, graphs_html, follow_up_examples#, vanna_data
|
climateqa/engine/chains/answer_rag.py
CHANGED
@@ -65,6 +65,7 @@ def make_rag_node(llm,with_docs = True):
|
|
65 |
async def answer_rag(state,config):
|
66 |
print("---- Answer RAG ----")
|
67 |
start_time = time.time()
|
|
|
68 |
print("Sources used : " + "\n".join([x.metadata["short_name"] + " - page " + str(x.metadata["page_number"]) for x in state["documents"]]))
|
69 |
|
70 |
answer = await rag_chain.ainvoke(state,config)
|
@@ -73,9 +74,10 @@ def make_rag_node(llm,with_docs = True):
|
|
73 |
elapsed_time = end_time - start_time
|
74 |
print("RAG elapsed time: ", elapsed_time)
|
75 |
print("Answer size : ", len(answer))
|
76 |
-
# print(f"\n\nAnswer:\n{answer}")
|
77 |
|
78 |
-
|
|
|
|
|
79 |
|
80 |
return answer_rag
|
81 |
|
|
|
65 |
async def answer_rag(state,config):
|
66 |
print("---- Answer RAG ----")
|
67 |
start_time = time.time()
|
68 |
+
chat_history = state.get("chat_history",[])
|
69 |
print("Sources used : " + "\n".join([x.metadata["short_name"] + " - page " + str(x.metadata["page_number"]) for x in state["documents"]]))
|
70 |
|
71 |
answer = await rag_chain.ainvoke(state,config)
|
|
|
74 |
elapsed_time = end_time - start_time
|
75 |
print("RAG elapsed time: ", elapsed_time)
|
76 |
print("Answer size : ", len(answer))
|
|
|
77 |
|
78 |
+
chat_history.append({"question":state["query"],"answer":answer})
|
79 |
+
|
80 |
+
return {"answer":answer,"chat_history": chat_history}
|
81 |
|
82 |
return answer_rag
|
83 |
|
climateqa/engine/chains/follow_up.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List
|
2 |
+
from langchain.prompts import ChatPromptTemplate
|
3 |
+
|
4 |
+
|
5 |
+
FOLLOW_UP_TEMPLATE = """Based on the previous question and answer, generate 2-3 relevant follow-up questions that would help explore the topic further.
|
6 |
+
|
7 |
+
Previous Question: {user_input}
|
8 |
+
Previous Answer: {answer}
|
9 |
+
|
10 |
+
Generate short, concise, focused follow-up questions
|
11 |
+
You don't need a full question as it will be reformulated later as a standalone question with the context. Eg. "Details the first point"
|
12 |
+
"""
|
13 |
+
|
14 |
+
def make_follow_up_node(llm):
|
15 |
+
prompt = ChatPromptTemplate.from_template(FOLLOW_UP_TEMPLATE)
|
16 |
+
|
17 |
+
def generate_follow_up(state):
|
18 |
+
if not state.get("answer"):
|
19 |
+
return state
|
20 |
+
|
21 |
+
response = llm.invoke(prompt.format(
|
22 |
+
user_input=state["user_input"],
|
23 |
+
answer=state["answer"]
|
24 |
+
))
|
25 |
+
|
26 |
+
# Extract questions from response
|
27 |
+
follow_ups = [q.strip() for q in response.content.split("\n") if q.strip()]
|
28 |
+
state["follow_up_questions"] = follow_ups
|
29 |
+
|
30 |
+
return state
|
31 |
+
|
32 |
+
return generate_follow_up
|
climateqa/engine/chains/standalone_question.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.prompts import ChatPromptTemplate
|
2 |
+
|
3 |
+
def make_standalone_question_chain(llm):
|
4 |
+
prompt = ChatPromptTemplate.from_messages([
|
5 |
+
("system", """You are a helpful assistant that transforms user questions into standalone questions
|
6 |
+
by incorporating context from the chat history if needed. The output should be a self-contained
|
7 |
+
question that can be understood without any additional context.
|
8 |
+
|
9 |
+
Examples:
|
10 |
+
Chat History: "Let's talk about renewable energy"
|
11 |
+
User Input: "What about solar?"
|
12 |
+
Output: "What are the key aspects of solar energy as a renewable energy source?"
|
13 |
+
|
14 |
+
Chat History: "What causes global warming?"
|
15 |
+
User Input: "And what are its effects?"
|
16 |
+
Output: "What are the effects of global warming on the environment and society?"
|
17 |
+
"""),
|
18 |
+
("user", """Chat History: {chat_history}
|
19 |
+
User Question: {question}
|
20 |
+
|
21 |
+
Transform this into a standalone question:""")
|
22 |
+
])
|
23 |
+
|
24 |
+
chain = prompt | llm
|
25 |
+
return chain
|
26 |
+
|
27 |
+
def make_standalone_question_node(llm):
|
28 |
+
standalone_chain = make_standalone_question_chain(llm)
|
29 |
+
|
30 |
+
def transform_to_standalone(state):
|
31 |
+
chat_history = state.get("chat_history", "")
|
32 |
+
output = standalone_chain.invoke({
|
33 |
+
"chat_history": chat_history,
|
34 |
+
"question": state["user_input"]
|
35 |
+
})
|
36 |
+
state["user_input"] = output.content
|
37 |
+
return state
|
38 |
+
|
39 |
+
return transform_to_standalone
|
climateqa/engine/graph.py
CHANGED
@@ -23,13 +23,15 @@ from .chains.retrieve_documents import make_IPx_retriever_node, make_POC_retriev
|
|
23 |
from .chains.answer_rag import make_rag_node
|
24 |
from .chains.graph_retriever import make_graph_retriever_node
|
25 |
from .chains.chitchat_categorization import make_chitchat_intent_categorization_node
|
26 |
-
|
|
|
27 |
|
28 |
class GraphState(TypedDict):
|
29 |
"""
|
30 |
Represents the state of our graph.
|
31 |
"""
|
32 |
user_input : str
|
|
|
33 |
language : str
|
34 |
intent : str
|
35 |
search_graphs_chitchat : bool
|
@@ -49,6 +51,7 @@ class GraphState(TypedDict):
|
|
49 |
recommended_content : List[Document] # OWID Graphs # TODO merge with related_contents
|
50 |
search_only : bool = False
|
51 |
reports : List[str] = []
|
|
|
52 |
|
53 |
def dummy(state):
|
54 |
return
|
@@ -111,6 +114,11 @@ def route_retrieve_documents(state):
|
|
111 |
return END
|
112 |
return sources_to_retrieve
|
113 |
|
|
|
|
|
|
|
|
|
|
|
114 |
def make_id_dict(values):
|
115 |
return {k:k for k in values}
|
116 |
|
@@ -119,6 +127,7 @@ def make_graph_agent(llm, vectorstore_ipcc, vectorstore_graphs, vectorstore_regi
|
|
119 |
workflow = StateGraph(GraphState)
|
120 |
|
121 |
# Define the node functions
|
|
|
122 |
categorize_intent = make_intent_categorization_node(llm)
|
123 |
transform_query = make_query_transform_node(llm)
|
124 |
translate_query = make_translation_node(llm)
|
@@ -130,9 +139,11 @@ def make_graph_agent(llm, vectorstore_ipcc, vectorstore_graphs, vectorstore_regi
|
|
130 |
answer_rag = make_rag_node(llm, with_docs=True)
|
131 |
answer_rag_no_docs = make_rag_node(llm, with_docs=False)
|
132 |
chitchat_categorize_intent = make_chitchat_intent_categorization_node(llm)
|
|
|
133 |
|
134 |
# Define the nodes
|
135 |
# workflow.add_node("set_defaults", set_defaults)
|
|
|
136 |
workflow.add_node("categorize_intent", categorize_intent)
|
137 |
workflow.add_node("answer_climate", dummy)
|
138 |
workflow.add_node("answer_search", answer_search)
|
@@ -146,9 +157,11 @@ def make_graph_agent(llm, vectorstore_ipcc, vectorstore_graphs, vectorstore_regi
|
|
146 |
workflow.add_node("retrieve_documents", retrieve_documents)
|
147 |
workflow.add_node("answer_rag", answer_rag)
|
148 |
workflow.add_node("answer_rag_no_docs", answer_rag_no_docs)
|
|
|
|
|
149 |
|
150 |
# Entry point
|
151 |
-
workflow.set_entry_point("
|
152 |
|
153 |
# CONDITIONAL EDGES
|
154 |
workflow.add_conditional_edges(
|
@@ -180,20 +193,29 @@ def make_graph_agent(llm, vectorstore_ipcc, vectorstore_graphs, vectorstore_regi
|
|
180 |
make_id_dict(["retrieve_graphs", END])
|
181 |
)
|
182 |
|
|
|
|
|
|
|
|
|
|
|
|
|
183 |
# Define the edges
|
|
|
184 |
workflow.add_edge("translate_query", "transform_query")
|
185 |
workflow.add_edge("transform_query", "retrieve_documents") #TODO put back
|
186 |
# workflow.add_edge("transform_query", "retrieve_local_data")
|
187 |
# workflow.add_edge("transform_query", END) # TODO remove
|
188 |
|
189 |
workflow.add_edge("retrieve_graphs", END)
|
190 |
-
workflow.add_edge("answer_rag",
|
191 |
-
workflow.add_edge("answer_rag_no_docs",
|
192 |
workflow.add_edge("answer_chitchat", "chitchat_categorize_intent")
|
193 |
workflow.add_edge("retrieve_graphs_chitchat", END)
|
194 |
|
195 |
# workflow.add_edge("retrieve_local_data", "answer_search")
|
196 |
workflow.add_edge("retrieve_documents", "answer_search")
|
|
|
|
|
197 |
|
198 |
# Compile
|
199 |
app = workflow.compile()
|
@@ -219,6 +241,8 @@ def make_graph_agent_poc(llm, vectorstore_ipcc, vectorstore_graphs, vectorstore_
|
|
219 |
workflow = StateGraph(GraphState)
|
220 |
|
221 |
# Define the node functions
|
|
|
|
|
222 |
categorize_intent = make_intent_categorization_node(llm)
|
223 |
transform_query = make_query_transform_node(llm)
|
224 |
translate_query = make_translation_node(llm)
|
@@ -231,9 +255,11 @@ def make_graph_agent_poc(llm, vectorstore_ipcc, vectorstore_graphs, vectorstore_
|
|
231 |
answer_rag = make_rag_node(llm, with_docs=True)
|
232 |
answer_rag_no_docs = make_rag_node(llm, with_docs=False)
|
233 |
chitchat_categorize_intent = make_chitchat_intent_categorization_node(llm)
|
|
|
234 |
|
235 |
# Define the nodes
|
236 |
# workflow.add_node("set_defaults", set_defaults)
|
|
|
237 |
workflow.add_node("categorize_intent", categorize_intent)
|
238 |
workflow.add_node("answer_climate", dummy)
|
239 |
workflow.add_node("answer_search", answer_search)
|
@@ -249,9 +275,10 @@ def make_graph_agent_poc(llm, vectorstore_ipcc, vectorstore_graphs, vectorstore_
|
|
249 |
workflow.add_node("retrieve_documents", retrieve_documents)
|
250 |
workflow.add_node("answer_rag", answer_rag)
|
251 |
workflow.add_node("answer_rag_no_docs", answer_rag_no_docs)
|
|
|
252 |
|
253 |
# Entry point
|
254 |
-
workflow.set_entry_point("
|
255 |
|
256 |
# CONDITIONAL EDGES
|
257 |
workflow.add_conditional_edges(
|
@@ -284,22 +311,21 @@ def make_graph_agent_poc(llm, vectorstore_ipcc, vectorstore_graphs, vectorstore_
|
|
284 |
)
|
285 |
|
286 |
# Define the edges
|
|
|
287 |
workflow.add_edge("translate_query", "transform_query")
|
288 |
workflow.add_edge("transform_query", "retrieve_documents") #TODO put back
|
289 |
workflow.add_edge("transform_query", "retrieve_local_data")
|
290 |
# workflow.add_edge("transform_query", END) # TODO remove
|
291 |
|
292 |
workflow.add_edge("retrieve_graphs", END)
|
293 |
-
workflow.add_edge("answer_rag",
|
294 |
-
workflow.add_edge("answer_rag_no_docs",
|
295 |
workflow.add_edge("answer_chitchat", "chitchat_categorize_intent")
|
296 |
workflow.add_edge("retrieve_graphs_chitchat", END)
|
297 |
|
298 |
workflow.add_edge("retrieve_local_data", "answer_search")
|
299 |
workflow.add_edge("retrieve_documents", "answer_search")
|
300 |
-
|
301 |
-
# workflow.add_edge("transform_query", "retrieve_drias_data")
|
302 |
-
# workflow.add_edge("retrieve_drias_data", END)
|
303 |
|
304 |
|
305 |
# Compile
|
|
|
23 |
from .chains.answer_rag import make_rag_node
|
24 |
from .chains.graph_retriever import make_graph_retriever_node
|
25 |
from .chains.chitchat_categorization import make_chitchat_intent_categorization_node
|
26 |
+
from .chains.standalone_question import make_standalone_question_node
|
27 |
+
from .chains.follow_up import make_follow_up_node # Add this import
|
28 |
|
29 |
class GraphState(TypedDict):
|
30 |
"""
|
31 |
Represents the state of our graph.
|
32 |
"""
|
33 |
user_input : str
|
34 |
+
chat_history : str
|
35 |
language : str
|
36 |
intent : str
|
37 |
search_graphs_chitchat : bool
|
|
|
51 |
recommended_content : List[Document] # OWID Graphs # TODO merge with related_contents
|
52 |
search_only : bool = False
|
53 |
reports : List[str] = []
|
54 |
+
follow_up_questions: List[str] = []
|
55 |
|
56 |
def dummy(state):
|
57 |
return
|
|
|
114 |
return END
|
115 |
return sources_to_retrieve
|
116 |
|
117 |
+
def route_follow_up(state):
|
118 |
+
if state["follow_up_questions"]:
|
119 |
+
return "process_follow_up"
|
120 |
+
return END
|
121 |
+
|
122 |
def make_id_dict(values):
|
123 |
return {k:k for k in values}
|
124 |
|
|
|
127 |
workflow = StateGraph(GraphState)
|
128 |
|
129 |
# Define the node functions
|
130 |
+
standalone_question_node = make_standalone_question_node(llm)
|
131 |
categorize_intent = make_intent_categorization_node(llm)
|
132 |
transform_query = make_query_transform_node(llm)
|
133 |
translate_query = make_translation_node(llm)
|
|
|
139 |
answer_rag = make_rag_node(llm, with_docs=True)
|
140 |
answer_rag_no_docs = make_rag_node(llm, with_docs=False)
|
141 |
chitchat_categorize_intent = make_chitchat_intent_categorization_node(llm)
|
142 |
+
generate_follow_up = make_follow_up_node(llm)
|
143 |
|
144 |
# Define the nodes
|
145 |
# workflow.add_node("set_defaults", set_defaults)
|
146 |
+
workflow.add_node("standalone_question", standalone_question_node)
|
147 |
workflow.add_node("categorize_intent", categorize_intent)
|
148 |
workflow.add_node("answer_climate", dummy)
|
149 |
workflow.add_node("answer_search", answer_search)
|
|
|
157 |
workflow.add_node("retrieve_documents", retrieve_documents)
|
158 |
workflow.add_node("answer_rag", answer_rag)
|
159 |
workflow.add_node("answer_rag_no_docs", answer_rag_no_docs)
|
160 |
+
workflow.add_node("generate_follow_up", generate_follow_up)
|
161 |
+
# workflow.add_node("process_follow_up", standalone_question_node)
|
162 |
|
163 |
# Entry point
|
164 |
+
workflow.set_entry_point("standalone_question")
|
165 |
|
166 |
# CONDITIONAL EDGES
|
167 |
workflow.add_conditional_edges(
|
|
|
193 |
make_id_dict(["retrieve_graphs", END])
|
194 |
)
|
195 |
|
196 |
+
# workflow.add_conditional_edges(
|
197 |
+
# "generate_follow_up",
|
198 |
+
# route_follow_up,
|
199 |
+
# make_id_dict(["process_follow_up", END])
|
200 |
+
# )
|
201 |
+
|
202 |
# Define the edges
|
203 |
+
workflow.add_edge("standalone_question", "categorize_intent")
|
204 |
workflow.add_edge("translate_query", "transform_query")
|
205 |
workflow.add_edge("transform_query", "retrieve_documents") #TODO put back
|
206 |
# workflow.add_edge("transform_query", "retrieve_local_data")
|
207 |
# workflow.add_edge("transform_query", END) # TODO remove
|
208 |
|
209 |
workflow.add_edge("retrieve_graphs", END)
|
210 |
+
workflow.add_edge("answer_rag", "generate_follow_up")
|
211 |
+
workflow.add_edge("answer_rag_no_docs", "generate_follow_up")
|
212 |
workflow.add_edge("answer_chitchat", "chitchat_categorize_intent")
|
213 |
workflow.add_edge("retrieve_graphs_chitchat", END)
|
214 |
|
215 |
# workflow.add_edge("retrieve_local_data", "answer_search")
|
216 |
workflow.add_edge("retrieve_documents", "answer_search")
|
217 |
+
workflow.add_edge("generate_follow_up",END)
|
218 |
+
# workflow.add_edge("process_follow_up", "categorize_intent")
|
219 |
|
220 |
# Compile
|
221 |
app = workflow.compile()
|
|
|
241 |
workflow = StateGraph(GraphState)
|
242 |
|
243 |
# Define the node functions
|
244 |
+
standalone_question_node = make_standalone_question_node(llm)
|
245 |
+
|
246 |
categorize_intent = make_intent_categorization_node(llm)
|
247 |
transform_query = make_query_transform_node(llm)
|
248 |
translate_query = make_translation_node(llm)
|
|
|
255 |
answer_rag = make_rag_node(llm, with_docs=True)
|
256 |
answer_rag_no_docs = make_rag_node(llm, with_docs=False)
|
257 |
chitchat_categorize_intent = make_chitchat_intent_categorization_node(llm)
|
258 |
+
generate_follow_up = make_follow_up_node(llm)
|
259 |
|
260 |
# Define the nodes
|
261 |
# workflow.add_node("set_defaults", set_defaults)
|
262 |
+
workflow.add_node("standalone_question", standalone_question_node)
|
263 |
workflow.add_node("categorize_intent", categorize_intent)
|
264 |
workflow.add_node("answer_climate", dummy)
|
265 |
workflow.add_node("answer_search", answer_search)
|
|
|
275 |
workflow.add_node("retrieve_documents", retrieve_documents)
|
276 |
workflow.add_node("answer_rag", answer_rag)
|
277 |
workflow.add_node("answer_rag_no_docs", answer_rag_no_docs)
|
278 |
+
workflow.add_node("generate_follow_up", generate_follow_up)
|
279 |
|
280 |
# Entry point
|
281 |
+
workflow.set_entry_point("standalone_question")
|
282 |
|
283 |
# CONDITIONAL EDGES
|
284 |
workflow.add_conditional_edges(
|
|
|
311 |
)
|
312 |
|
313 |
# Define the edges
|
314 |
+
workflow.add_edge("standalone_question", "categorize_intent")
|
315 |
workflow.add_edge("translate_query", "transform_query")
|
316 |
workflow.add_edge("transform_query", "retrieve_documents") #TODO put back
|
317 |
workflow.add_edge("transform_query", "retrieve_local_data")
|
318 |
# workflow.add_edge("transform_query", END) # TODO remove
|
319 |
|
320 |
workflow.add_edge("retrieve_graphs", END)
|
321 |
+
workflow.add_edge("answer_rag", "generate_follow_up")
|
322 |
+
workflow.add_edge("answer_rag_no_docs", "generate_follow_up")
|
323 |
workflow.add_edge("answer_chitchat", "chitchat_categorize_intent")
|
324 |
workflow.add_edge("retrieve_graphs_chitchat", END)
|
325 |
|
326 |
workflow.add_edge("retrieve_local_data", "answer_search")
|
327 |
workflow.add_edge("retrieve_documents", "answer_search")
|
328 |
+
workflow.add_edge("generate_follow_up",END)
|
|
|
|
|
329 |
|
330 |
|
331 |
# Compile
|
front/tabs/__init__.py
CHANGED
@@ -3,4 +3,7 @@ from .tab_examples import create_examples_tab
|
|
3 |
from .tab_papers import create_papers_tab
|
4 |
from .tab_figures import create_figures_tab
|
5 |
from .chat_interface import create_chat_interface
|
6 |
-
from .tab_about import create_about_tab
|
|
|
|
|
|
|
|
3 |
from .tab_papers import create_papers_tab
|
4 |
from .tab_figures import create_figures_tab
|
5 |
from .chat_interface import create_chat_interface
|
6 |
+
from .tab_about import create_about_tab
|
7 |
+
from .main_tab import MainTabPanel
|
8 |
+
from .tab_config import ConfigPanel
|
9 |
+
from .main_tab import cqa_tab
|
front/tabs/chat_interface.py
CHANGED
@@ -54,7 +54,10 @@ def create_chat_interface(tab):
|
|
54 |
max_height="80vh",
|
55 |
height="100vh"
|
56 |
)
|
57 |
-
|
|
|
|
|
|
|
58 |
with gr.Row(elem_id="input-message"):
|
59 |
|
60 |
textbox = gr.Textbox(
|
@@ -68,7 +71,7 @@ def create_chat_interface(tab):
|
|
68 |
|
69 |
config_button = gr.Button("", elem_id="config-button")
|
70 |
|
71 |
-
return chatbot, textbox, config_button
|
72 |
|
73 |
|
74 |
|
|
|
54 |
max_height="80vh",
|
55 |
height="100vh"
|
56 |
)
|
57 |
+
with gr.Row(elem_id="follow-up-examples"):
|
58 |
+
follow_up_examples_hidden = gr.Textbox(visible=False, elem_id="follow-up-hidden")
|
59 |
+
follow_up_examples = gr.Examples(examples=[ ], label="Follow up questions", inputs= [follow_up_examples_hidden], elem_id="follow-up-button", run_on_click=False)
|
60 |
+
|
61 |
with gr.Row(elem_id="input-message"):
|
62 |
|
63 |
textbox = gr.Textbox(
|
|
|
71 |
|
72 |
config_button = gr.Button("", elem_id="config-button")
|
73 |
|
74 |
+
return chatbot, textbox, config_button, follow_up_examples, follow_up_examples_hidden
|
75 |
|
76 |
|
77 |
|
front/tabs/main_tab.py
CHANGED
@@ -1,8 +1,37 @@
|
|
1 |
import gradio as gr
|
|
|
|
|
2 |
from .chat_interface import create_chat_interface
|
3 |
from .tab_examples import create_examples_tab
|
4 |
from .tab_papers import create_papers_tab
|
5 |
from .tab_figures import create_figures_tab
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
def cqa_tab(tab_name):
|
8 |
# State variables
|
@@ -11,14 +40,14 @@ def cqa_tab(tab_name):
|
|
11 |
with gr.Row(elem_id="chatbot-row"):
|
12 |
# Left column - Chat interface
|
13 |
with gr.Column(scale=2):
|
14 |
-
chatbot, textbox, config_button = create_chat_interface(tab_name)
|
15 |
|
16 |
# Right column - Content panels
|
17 |
with gr.Column(scale=2, variant="panel", elem_id="right-panel"):
|
18 |
with gr.Tabs(elem_id="right_panel_tab") as tabs:
|
19 |
# Examples tab
|
20 |
with gr.TabItem("Examples", elem_id="tab-examples", id=0):
|
21 |
-
examples_hidden
|
22 |
|
23 |
# Sources tab
|
24 |
with gr.Tab("Sources", elem_id="tab-sources", id=1) as tab_sources:
|
@@ -34,7 +63,7 @@ def cqa_tab(tab_name):
|
|
34 |
|
35 |
# Papers subtab
|
36 |
with gr.Tab("Papers", elem_id="tab-citations", id=4) as tab_papers:
|
37 |
-
papers_summary, papers_html, citations_network, papers_modal = create_papers_tab()
|
38 |
|
39 |
# Graphs subtab
|
40 |
with gr.Tab("Graphs", elem_id="tab-graphs", id=5) as tab_graphs:
|
@@ -42,27 +71,30 @@ def cqa_tab(tab_name):
|
|
42 |
"<h2>There are no graphs to be displayed at the moment. Try asking another question.</h2>",
|
43 |
elem_id="graphs-container"
|
44 |
)
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
from gradio.helpers import Examples
|
3 |
+
from typing import TypedDict
|
4 |
from .chat_interface import create_chat_interface
|
5 |
from .tab_examples import create_examples_tab
|
6 |
from .tab_papers import create_papers_tab
|
7 |
from .tab_figures import create_figures_tab
|
8 |
+
from dataclasses import dataclass
|
9 |
+
|
10 |
+
@dataclass
|
11 |
+
class MainTabPanel:
|
12 |
+
chatbot: gr.Chatbot
|
13 |
+
textbox: gr.Textbox
|
14 |
+
tabs: gr.Tabs
|
15 |
+
sources_raw: gr.State
|
16 |
+
new_figures: gr.State
|
17 |
+
current_graphs: gr.State
|
18 |
+
examples_hidden: gr.State
|
19 |
+
sources_textbox: gr.HTML
|
20 |
+
figures_cards: gr.HTML
|
21 |
+
gallery_component: gr.Gallery
|
22 |
+
config_button: gr.Button
|
23 |
+
papers_direct_search: gr.TextArea
|
24 |
+
papers_html: gr.HTML
|
25 |
+
citations_network: gr.Plot
|
26 |
+
papers_summary: gr.Textbox
|
27 |
+
tab_recommended_content: gr.Tab
|
28 |
+
tab_sources: gr.Tab
|
29 |
+
tab_figures: gr.Tab
|
30 |
+
tab_graphs: gr.Tab
|
31 |
+
tab_papers: gr.Tab
|
32 |
+
graph_container: gr.HTML
|
33 |
+
follow_up_examples : Examples
|
34 |
+
follow_up_examples_hidden : gr.Textbox
|
35 |
|
36 |
def cqa_tab(tab_name):
|
37 |
# State variables
|
|
|
40 |
with gr.Row(elem_id="chatbot-row"):
|
41 |
# Left column - Chat interface
|
42 |
with gr.Column(scale=2):
|
43 |
+
chatbot, textbox, config_button, follow_up_examples, follow_up_examples_hidden = create_chat_interface(tab_name)
|
44 |
|
45 |
# Right column - Content panels
|
46 |
with gr.Column(scale=2, variant="panel", elem_id="right-panel"):
|
47 |
with gr.Tabs(elem_id="right_panel_tab") as tabs:
|
48 |
# Examples tab
|
49 |
with gr.TabItem("Examples", elem_id="tab-examples", id=0):
|
50 |
+
examples_hidden = create_examples_tab(tab_name)
|
51 |
|
52 |
# Sources tab
|
53 |
with gr.Tab("Sources", elem_id="tab-sources", id=1) as tab_sources:
|
|
|
63 |
|
64 |
# Papers subtab
|
65 |
with gr.Tab("Papers", elem_id="tab-citations", id=4) as tab_papers:
|
66 |
+
papers_direct_search, papers_summary, papers_html, citations_network, papers_modal = create_papers_tab()
|
67 |
|
68 |
# Graphs subtab
|
69 |
with gr.Tab("Graphs", elem_id="tab-graphs", id=5) as tab_graphs:
|
|
|
71 |
"<h2>There are no graphs to be displayed at the moment. Try asking another question.</h2>",
|
72 |
elem_id="graphs-container"
|
73 |
)
|
74 |
+
|
75 |
+
|
76 |
+
return MainTabPanel(
|
77 |
+
chatbot=chatbot,
|
78 |
+
textbox=textbox,
|
79 |
+
tabs=tabs,
|
80 |
+
sources_raw=sources_raw,
|
81 |
+
new_figures=new_figures,
|
82 |
+
current_graphs=current_graphs,
|
83 |
+
examples_hidden=examples_hidden,
|
84 |
+
sources_textbox=sources_textbox,
|
85 |
+
figures_cards=figures_cards,
|
86 |
+
gallery_component=gallery_component,
|
87 |
+
config_button=config_button,
|
88 |
+
papers_direct_search=papers_direct_search,
|
89 |
+
papers_html=papers_html,
|
90 |
+
citations_network=citations_network,
|
91 |
+
papers_summary=papers_summary,
|
92 |
+
tab_recommended_content=tab_recommended_content,
|
93 |
+
tab_sources=tab_sources,
|
94 |
+
tab_figures=tab_figures,
|
95 |
+
tab_graphs=tab_graphs,
|
96 |
+
tab_papers=tab_papers,
|
97 |
+
graph_container=graphs_container,
|
98 |
+
follow_up_examples= follow_up_examples,
|
99 |
+
follow_up_examples_hidden = follow_up_examples_hidden
|
100 |
+
)
|
front/tabs/tab_config.py
CHANGED
@@ -2,8 +2,10 @@ import gradio as gr
|
|
2 |
from gradio_modal import Modal
|
3 |
from climateqa.constants import POSSIBLE_REPORTS
|
4 |
from typing import TypedDict
|
|
|
5 |
|
6 |
-
|
|
|
7 |
config_open: gr.State
|
8 |
config_modal: Modal
|
9 |
dropdown_sources: gr.CheckboxGroup
|
@@ -14,6 +16,7 @@ class ConfigPanel(TypedDict):
|
|
14 |
after: gr.Slider
|
15 |
output_query: gr.Textbox
|
16 |
output_language: gr.Textbox
|
|
|
17 |
|
18 |
|
19 |
def create_config_modal():
|
@@ -95,29 +98,16 @@ def create_config_modal():
|
|
95 |
|
96 |
close_config_modal_button = gr.Button("Validate and Close", elem_id="close-config-modal")
|
97 |
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
return {
|
112 |
-
"config_open" : config_open,
|
113 |
-
"config_modal": config_modal,
|
114 |
-
"dropdown_sources": dropdown_sources,
|
115 |
-
"dropdown_reports": dropdown_reports,
|
116 |
-
"dropdown_external_sources": dropdown_external_sources,
|
117 |
-
"search_only": search_only,
|
118 |
-
"dropdown_audience": dropdown_audience,
|
119 |
-
"after": after,
|
120 |
-
"output_query": output_query,
|
121 |
-
"output_language": output_language,
|
122 |
-
"close_config_modal_button": close_config_modal_button
|
123 |
-
}
|
|
|
2 |
from gradio_modal import Modal
|
3 |
from climateqa.constants import POSSIBLE_REPORTS
|
4 |
from typing import TypedDict
|
5 |
+
from dataclasses import dataclass
|
6 |
|
7 |
+
@dataclass
|
8 |
+
class ConfigPanel:
|
9 |
config_open: gr.State
|
10 |
config_modal: Modal
|
11 |
dropdown_sources: gr.CheckboxGroup
|
|
|
16 |
after: gr.Slider
|
17 |
output_query: gr.Textbox
|
18 |
output_language: gr.Textbox
|
19 |
+
close_config_modal_button: gr.Button
|
20 |
|
21 |
|
22 |
def create_config_modal():
|
|
|
98 |
|
99 |
close_config_modal_button = gr.Button("Validate and Close", elem_id="close-config-modal")
|
100 |
|
101 |
+
return ConfigPanel(
|
102 |
+
config_open=config_open,
|
103 |
+
config_modal=config_modal,
|
104 |
+
dropdown_sources=dropdown_sources,
|
105 |
+
dropdown_reports=dropdown_reports,
|
106 |
+
dropdown_external_sources=dropdown_external_sources,
|
107 |
+
search_only=search_only,
|
108 |
+
dropdown_audience=dropdown_audience,
|
109 |
+
after=after,
|
110 |
+
output_query=output_query,
|
111 |
+
output_language=output_language,
|
112 |
+
close_config_modal_button=close_config_modal_button
|
113 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
style.css
CHANGED
@@ -115,6 +115,11 @@ main.flex.flex-1.flex-col {
|
|
115 |
border-radius: 40px;
|
116 |
padding-left: 30px;
|
117 |
resize: none;
|
|
|
|
|
|
|
|
|
|
|
118 |
}
|
119 |
|
120 |
#input-message > div {
|
@@ -479,11 +484,6 @@ a {
|
|
479 |
height: 15vh;
|
480 |
overflow-y: auto;
|
481 |
padding: 10px 0;
|
482 |
-
display: none; /* Hide by default */
|
483 |
-
}
|
484 |
-
|
485 |
-
#follow-up-examples:not(:empty) {
|
486 |
-
display: block; /* Show only when there's content */
|
487 |
}
|
488 |
|
489 |
#follow-up-button {
|
@@ -518,17 +518,14 @@ a {
|
|
518 |
}
|
519 |
|
520 |
div#chatbot {
|
521 |
-
height: 80vh !important; /* Increased height when no follow-ups */
|
522 |
-
max-height: 80vh !important;
|
523 |
-
transition: height 0.3s ease;
|
524 |
-
}
|
525 |
-
|
526 |
-
/* Adjust chatbot height when follow-up examples are present */
|
527 |
-
div#chatbot:has(+ #follow-up-examples:not(:empty)) {
|
528 |
height: 65vh !important;
|
529 |
max-height: 65vh !important;
|
530 |
}
|
531 |
|
|
|
|
|
|
|
|
|
532 |
div#graphs-container {
|
533 |
height: calc(100vh - 210px) !important;
|
534 |
overflow-y: scroll !important;
|
@@ -543,13 +540,7 @@ a {
|
|
543 |
/* Mobile Media Query */
|
544 |
@media screen and (max-width: 767px) {
|
545 |
div#chatbot {
|
546 |
-
height:
|
547 |
-
transition: height 0.3s ease;
|
548 |
-
}
|
549 |
-
|
550 |
-
/* Adjust chatbot height when follow-up examples are present */
|
551 |
-
div#chatbot:has(+ #follow-up-examples:not(:empty)) {
|
552 |
-
height: 400px !important;
|
553 |
}
|
554 |
|
555 |
#follow-up-examples {
|
|
|
115 |
border-radius: 40px;
|
116 |
padding-left: 30px;
|
117 |
resize: none;
|
118 |
+
background-color: #f0f8ff; /* Light blue background */
|
119 |
+
border: 2px solid #4b8ec3; /* Blue border */
|
120 |
+
font-size: 16px; /* Increase font size */
|
121 |
+
color: #333; /* Text color */
|
122 |
+
box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1); /* Add shadow */
|
123 |
}
|
124 |
|
125 |
#input-message > div {
|
|
|
484 |
height: 15vh;
|
485 |
overflow-y: auto;
|
486 |
padding: 10px 0;
|
|
|
|
|
|
|
|
|
|
|
487 |
}
|
488 |
|
489 |
#follow-up-button {
|
|
|
518 |
}
|
519 |
|
520 |
div#chatbot {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
521 |
height: 65vh !important;
|
522 |
max-height: 65vh !important;
|
523 |
}
|
524 |
|
525 |
+
div#chatbot-row {
|
526 |
+
max-height: calc(100vh - 90px) !important;
|
527 |
+
}
|
528 |
+
|
529 |
div#graphs-container {
|
530 |
height: calc(100vh - 210px) !important;
|
531 |
overflow-y: scroll !important;
|
|
|
540 |
/* Mobile Media Query */
|
541 |
@media screen and (max-width: 767px) {
|
542 |
div#chatbot {
|
543 |
+
height: 400px !important; /* Reduced from 500px */
|
|
|
|
|
|
|
|
|
|
|
|
|
544 |
}
|
545 |
|
546 |
#follow-up-examples {
|