Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,8 +1,6 @@
|
|
1 |
-
# ref: https://github.com/kram254/Mixture-of-Agents-running-on-Groq/tree/main
|
2 |
import streamlit as st
|
3 |
import json
|
4 |
-
import
|
5 |
-
from typing import Union, Iterable, AsyncIterable
|
6 |
from moa.agent import MOAgent
|
7 |
from moa.agent.moa import ResponseChunk
|
8 |
from streamlit_ace import st_ace
|
@@ -29,9 +27,11 @@ layer_agent_config_def = {
|
|
29 |
"system_prompt": "You are an expert at logic and reasoning. Always take a logical approach to the answer. {helper_response}",
|
30 |
"model_name": "llama3-8b-8192"
|
31 |
},
|
|
|
32 |
}
|
33 |
|
34 |
-
# Recommended
|
|
|
35 |
rec_config = {
|
36 |
"main_model": "llama3-70b-8192",
|
37 |
"cycles": 2,
|
@@ -61,58 +61,30 @@ layer_agent_config_rec = {
|
|
61 |
},
|
62 |
}
|
63 |
|
64 |
-
# Unified streaming function to handle async and sync responses
|
65 |
-
async def stream_or_async_response(messages: Union[Iterable[ResponseChunk], AsyncIterable[ResponseChunk]]):
|
66 |
-
"""
|
67 |
-
Handles both synchronous and asynchronous message streams.
|
68 |
-
Processes each response chunk and manages intermediate outputs.
|
69 |
-
:param messages: Union[Iterable, AsyncIterable]
|
70 |
-
"""
|
71 |
-
layer_outputs = {}
|
72 |
-
|
73 |
-
async def process_message(message):
|
74 |
-
"""
|
75 |
-
Process an individual message and return the content for streaming.
|
76 |
-
Returns an empty string if the message type is intermediate or invalid.
|
77 |
-
"""
|
78 |
-
# Debug: Log the incoming message
|
79 |
-
st.write(f"Processing message: {message}")
|
80 |
|
81 |
-
|
82 |
-
|
|
|
|
|
|
|
83 |
if layer not in layer_outputs:
|
84 |
layer_outputs[layer] = []
|
85 |
-
layer_outputs[layer].append(message
|
86 |
-
|
87 |
-
|
88 |
-
# Final message processing
|
89 |
for layer, outputs in layer_outputs.items():
|
90 |
st.write(f"Layer {layer}")
|
91 |
cols = st.columns(len(outputs))
|
92 |
for i, output in enumerate(outputs):
|
93 |
with cols[i]:
|
94 |
-
st.expander(label=f"Agent {i
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
# Check if the input is an async or sync iterable
|
103 |
-
if hasattr(messages, "__aiter__"): # Asynchronous iterable
|
104 |
-
async for message in messages:
|
105 |
-
resolved_message = await process_message(message)
|
106 |
-
yield resolved_message or "" # Yield empty string if None
|
107 |
-
elif hasattr(messages, "__iter__"): # Synchronous iterable
|
108 |
-
for message in messages:
|
109 |
-
resolved_message = process_message(message) # Do not `await` sync messages
|
110 |
-
yield resolved_message or "" # Yield empty string if None
|
111 |
-
else:
|
112 |
-
raise TypeError("'messages' must be an Iterable or AsyncIterable.")
|
113 |
-
|
114 |
|
115 |
-
# Set up the MOAgent
|
116 |
def set_moa_agent(
|
117 |
main_model: str = default_config['main_model'],
|
118 |
cycles: int = default_config['cycles'],
|
@@ -122,18 +94,26 @@ def set_moa_agent(
|
|
122 |
):
|
123 |
if override or ("main_model" not in st.session_state):
|
124 |
st.session_state.main_model = main_model
|
|
|
|
|
125 |
|
126 |
if override or ("cycles" not in st.session_state):
|
127 |
st.session_state.cycles = cycles
|
|
|
|
|
128 |
|
129 |
if override or ("layer_agent_config" not in st.session_state):
|
130 |
st.session_state.layer_agent_config = layer_agent_config
|
|
|
|
|
131 |
|
132 |
if override or ("main_temp" not in st.session_state):
|
133 |
st.session_state.main_temp = main_model_temperature
|
|
|
|
|
134 |
|
135 |
cls_ly_conf = copy.deepcopy(st.session_state.layer_agent_config)
|
136 |
-
|
137 |
if override or ("moa_agent" not in st.session_state):
|
138 |
st.session_state.moa_agent = MOAgent.from_config(
|
139 |
main_model=st.session_state.main_model,
|
@@ -145,10 +125,9 @@ def set_moa_agent(
|
|
145 |
del cls_ly_conf
|
146 |
del layer_agent_config
|
147 |
|
148 |
-
# Streamlit app layout
|
149 |
st.set_page_config(
|
150 |
page_title="Mixture of Agents",
|
151 |
-
|
152 |
'About': "## Groq Mixture-Of-Agents \n Powered by [Groq](https://groq.com)"
|
153 |
},
|
154 |
layout="wide"
|
@@ -161,9 +140,6 @@ valid_model_names = [
|
|
161 |
'mixtral-8x7b-32768'
|
162 |
]
|
163 |
|
164 |
-
#st.markdown("<a href='https://groq.com'><img src='app/static/banner.png' width='500'></a>", unsafe_allow_html=True)
|
165 |
-
st.write("---")
|
166 |
-
|
167 |
# Initialize session state
|
168 |
if "messages" not in st.session_state:
|
169 |
st.session_state.messages = []
|
@@ -172,6 +148,7 @@ set_moa_agent()
|
|
172 |
|
173 |
# Sidebar for configuration
|
174 |
with st.sidebar:
|
|
|
175 |
st.title("MOA Configuration")
|
176 |
with st.form("Agent Configuration", border=False):
|
177 |
if st.form_submit_button("Use Recommended Config"):
|
@@ -184,9 +161,10 @@ with st.sidebar:
|
|
184 |
)
|
185 |
st.session_state.messages = []
|
186 |
st.success("Configuration updated successfully!")
|
|
|
|
|
187 |
except Exception as e:
|
188 |
st.error(f"Error updating configuration: {str(e)}")
|
189 |
-
|
190 |
# Main model selection
|
191 |
new_main_model = st.selectbox(
|
192 |
"Select Main Model",
|
@@ -212,6 +190,8 @@ with st.sidebar:
|
|
212 |
)
|
213 |
|
214 |
# Layer agent configuration
|
|
|
|
|
215 |
new_layer_agent_config = st_ace(
|
216 |
value=json.dumps(st.session_state.layer_agent_config, indent=2),
|
217 |
language='json',
|
@@ -233,20 +213,31 @@ with st.sidebar:
|
|
233 |
)
|
234 |
st.session_state.messages = []
|
235 |
st.success("Configuration updated successfully!")
|
|
|
|
|
236 |
except Exception as e:
|
237 |
st.error(f"Error updating configuration: {str(e)}")
|
238 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
239 |
# Main app layout
|
240 |
-
st.header("
|
241 |
-
st.write("
|
|
|
242 |
|
243 |
# Display current configuration
|
244 |
with st.expander("Current MOA Configuration", expanded=False):
|
245 |
-
st.markdown(f"**Main Model**:
|
246 |
-
st.markdown(f"**Main Model Temperature**:
|
247 |
-
st.markdown(f"**Layers**:
|
248 |
-
st.markdown("**Layer Agents Config
|
249 |
-
st_ace(
|
250 |
value=json.dumps(st.session_state.layer_agent_config, indent=2),
|
251 |
language='json',
|
252 |
placeholder="Layer Agent Configuration (JSON)",
|
@@ -262,53 +253,14 @@ for message in st.session_state.messages:
|
|
262 |
st.markdown(message["content"])
|
263 |
|
264 |
if query := st.chat_input("Ask a question"):
|
265 |
-
|
266 |
-
|
267 |
-
st.
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
# Get messages from MOAgent (supports async streaming)
|
278 |
-
messages = moa_agent.chat(query, output_format='json')
|
279 |
-
|
280 |
-
try:
|
281 |
-
# Stream and display responses from `stream_or_async_response`
|
282 |
-
final_response = ""
|
283 |
-
async for response in stream_or_async_response(messages):
|
284 |
-
# Skip None or empty responses
|
285 |
-
if response is not None and response.strip():
|
286 |
-
final_response += response
|
287 |
-
message_placeholder.markdown(final_response)
|
288 |
-
else:
|
289 |
-
# Debug: Log skipped empty/None responses
|
290 |
-
st.write(f"Skipped empty or None response: {response}")
|
291 |
-
|
292 |
-
# Save the final response to session state
|
293 |
-
if final_response.strip(): # Only save non-empty responses
|
294 |
-
st.session_state.messages.append({"role": "assistant", "content": final_response})
|
295 |
-
else:
|
296 |
-
st.error("Received no valid response from the assistant.")
|
297 |
-
except Exception as e:
|
298 |
-
st.error(f"Error processing response: {e}")
|
299 |
-
# Debug: Log the exception
|
300 |
-
st.write(f"Exception details: {e}")
|
301 |
-
|
302 |
-
# Run the asynchronous handle_query function
|
303 |
-
asyncio.run(handle_query())
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
# Add acknowledgment at the bottom
|
310 |
-
st.markdown("---")
|
311 |
-
st.markdown("""
|
312 |
-
###
|
313 |
-
This app is based on [Emmanuel M. Ndaliro's work](https://github.com/kram254/Mixture-of-Agents-running-on-Groq/tree/main).
|
314 |
-
""")
|
|
|
|
|
1 |
import streamlit as st
|
2 |
import json
|
3 |
+
from typing import Iterable
|
|
|
4 |
from moa.agent import MOAgent
|
5 |
from moa.agent.moa import ResponseChunk
|
6 |
from streamlit_ace import st_ace
|
|
|
27 |
"system_prompt": "You are an expert at logic and reasoning. Always take a logical approach to the answer. {helper_response}",
|
28 |
"model_name": "llama3-8b-8192"
|
29 |
},
|
30 |
+
|
31 |
}
|
32 |
|
33 |
+
# Recommended Configuration
|
34 |
+
|
35 |
rec_config = {
|
36 |
"main_model": "llama3-70b-8192",
|
37 |
"cycles": 2,
|
|
|
61 |
},
|
62 |
}
|
63 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
|
65 |
+
def stream_response(messages: Iterable[ResponseChunk]):
|
66 |
+
layer_outputs = {}
|
67 |
+
for message in messages:
|
68 |
+
if message['response_type'] == 'intermediate':
|
69 |
+
layer = message['metadata']['layer']
|
70 |
if layer not in layer_outputs:
|
71 |
layer_outputs[layer] = []
|
72 |
+
layer_outputs[layer].append(message['delta'])
|
73 |
+
else:
|
74 |
+
# Display accumulated layer outputs
|
|
|
75 |
for layer, outputs in layer_outputs.items():
|
76 |
st.write(f"Layer {layer}")
|
77 |
cols = st.columns(len(outputs))
|
78 |
for i, output in enumerate(outputs):
|
79 |
with cols[i]:
|
80 |
+
st.expander(label=f"Agent {i+1}", expanded=False).write(output)
|
81 |
+
|
82 |
+
# Clear layer outputs for the next iteration
|
83 |
+
layer_outputs = {}
|
84 |
+
|
85 |
+
# Yield the main agent's output
|
86 |
+
yield message['delta']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
|
|
|
88 |
def set_moa_agent(
|
89 |
main_model: str = default_config['main_model'],
|
90 |
cycles: int = default_config['cycles'],
|
|
|
94 |
):
|
95 |
if override or ("main_model" not in st.session_state):
|
96 |
st.session_state.main_model = main_model
|
97 |
+
else:
|
98 |
+
if "main_model" not in st.session_state: st.session_state.main_model = main_model
|
99 |
|
100 |
if override or ("cycles" not in st.session_state):
|
101 |
st.session_state.cycles = cycles
|
102 |
+
else:
|
103 |
+
if "cycles" not in st.session_state: st.session_state.cycles = cycles
|
104 |
|
105 |
if override or ("layer_agent_config" not in st.session_state):
|
106 |
st.session_state.layer_agent_config = layer_agent_config
|
107 |
+
else:
|
108 |
+
if "layer_agent_config" not in st.session_state: st.session_state.layer_agent_config = layer_agent_config
|
109 |
|
110 |
if override or ("main_temp" not in st.session_state):
|
111 |
st.session_state.main_temp = main_model_temperature
|
112 |
+
else:
|
113 |
+
if "main_temp" not in st.session_state: st.session_state.main_temp = main_model_temperature
|
114 |
|
115 |
cls_ly_conf = copy.deepcopy(st.session_state.layer_agent_config)
|
116 |
+
|
117 |
if override or ("moa_agent" not in st.session_state):
|
118 |
st.session_state.moa_agent = MOAgent.from_config(
|
119 |
main_model=st.session_state.main_model,
|
|
|
125 |
del cls_ly_conf
|
126 |
del layer_agent_config
|
127 |
|
|
|
128 |
st.set_page_config(
|
129 |
page_title="Mixture of Agents",
|
130 |
+
menu_items={
|
131 |
'About': "## Groq Mixture-Of-Agents \n Powered by [Groq](https://groq.com)"
|
132 |
},
|
133 |
layout="wide"
|
|
|
140 |
'mixtral-8x7b-32768'
|
141 |
]
|
142 |
|
|
|
|
|
|
|
143 |
# Initialize session state
|
144 |
if "messages" not in st.session_state:
|
145 |
st.session_state.messages = []
|
|
|
148 |
|
149 |
# Sidebar for configuration
|
150 |
with st.sidebar:
|
151 |
+
# config_form = st.form("Agent Configuration", border=False)
|
152 |
st.title("MOA Configuration")
|
153 |
with st.form("Agent Configuration", border=False):
|
154 |
if st.form_submit_button("Use Recommended Config"):
|
|
|
161 |
)
|
162 |
st.session_state.messages = []
|
163 |
st.success("Configuration updated successfully!")
|
164 |
+
except json.JSONDecodeError:
|
165 |
+
st.error("Invalid JSON in Layer Agent Configuration. Please check your input.")
|
166 |
except Exception as e:
|
167 |
st.error(f"Error updating configuration: {str(e)}")
|
|
|
168 |
# Main model selection
|
169 |
new_main_model = st.selectbox(
|
170 |
"Select Main Model",
|
|
|
190 |
)
|
191 |
|
192 |
# Layer agent configuration
|
193 |
+
tooltip = "Agents in the layer agent configuration run in parallel _per cycle_. Each layer agent supports all initialization parameters of [Langchain's ChatGroq](https://api.python.langchain.com/en/latest/chat_models/langchain_groq.chat_models.ChatGroq.html) class as valid dictionary fields."
|
194 |
+
st.markdown("Layer Agent Config", help=tooltip)
|
195 |
new_layer_agent_config = st_ace(
|
196 |
value=json.dumps(st.session_state.layer_agent_config, indent=2),
|
197 |
language='json',
|
|
|
213 |
)
|
214 |
st.session_state.messages = []
|
215 |
st.success("Configuration updated successfully!")
|
216 |
+
except json.JSONDecodeError:
|
217 |
+
st.error("Invalid JSON in Layer Agent Configuration. Please check your input.")
|
218 |
except Exception as e:
|
219 |
st.error(f"Error updating configuration: {str(e)}")
|
220 |
|
221 |
+
st.markdown("---")
|
222 |
+
st.markdown("""
|
223 |
+
### Credits
|
224 |
+
- MOA: [Together AI](https://www.together.ai/blog/together-moa)
|
225 |
+
- LLMs: [Groq](https://groq.com/)
|
226 |
+
- Paper: [arXiv:2406.04692](https://arxiv.org/abs/2406.04692)
|
227 |
+
""")
|
228 |
+
|
229 |
# Main app layout
|
230 |
+
st.header("Karios Agents", anchor=False)
|
231 |
+
st.write("A this project oversees implementation of Mixture of Agents architecture Powered by Groq LLMs.")
|
232 |
+
# st.image("./static/moa_groq.svg", caption="Mixture of Agents Workflow", width=1000)
|
233 |
|
234 |
# Display current configuration
|
235 |
with st.expander("Current MOA Configuration", expanded=False):
|
236 |
+
st.markdown(f"**Main Model**: ``{st.session_state.main_model}``")
|
237 |
+
st.markdown(f"**Main Model Temperature**: ``{st.session_state.main_temp:.1f}``")
|
238 |
+
st.markdown(f"**Layers**: ``{st.session_state.cycles}``")
|
239 |
+
st.markdown(f"**Layer Agents Config**:")
|
240 |
+
new_layer_agent_config = st_ace(
|
241 |
value=json.dumps(st.session_state.layer_agent_config, indent=2),
|
242 |
language='json',
|
243 |
placeholder="Layer Agent Configuration (JSON)",
|
|
|
253 |
st.markdown(message["content"])
|
254 |
|
255 |
if query := st.chat_input("Ask a question"):
|
256 |
+
st.session_state.messages.append({"role": "user", "content": query})
|
257 |
+
with st.chat_message("user"):
|
258 |
+
st.write(query)
|
259 |
+
|
260 |
+
moa_agent: MOAgent = st.session_state.moa_agent
|
261 |
+
with st.chat_message("assistant"):
|
262 |
+
message_placeholder = st.empty()
|
263 |
+
ast_mess = stream_response(moa_agent.chat(query, output_format='json'))
|
264 |
+
response = st.write_stream(ast_mess)
|
265 |
+
|
266 |
+
st.session_state.messages.append({"role": "assistant", "content": response})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|