Update app.py
Browse files
app.py
CHANGED
@@ -3,6 +3,7 @@ import gradio as gr
|
|
3 |
import os
|
4 |
from PIL import Image
|
5 |
import spaces
|
|
|
6 |
|
7 |
from kimi_vl.serve.frontend import reload_javascript
|
8 |
from kimi_vl.serve.utils import (
|
@@ -88,7 +89,15 @@ def get_prompt(conversation) -> str:
|
|
88 |
system_prompt = conversation.system_template.format(system_message=conversation.system_message)
|
89 |
return system_prompt
|
90 |
|
|
|
|
|
|
|
|
|
|
|
|
|
91 |
|
|
|
|
|
92 |
@wrap_gen_fn
|
93 |
@spaces.GPU(duration=180)
|
94 |
def predict(
|
@@ -168,7 +177,7 @@ def predict(
|
|
168 |
full_response += x
|
169 |
response = strip_stop_words(full_response, stop_words)
|
170 |
conversation.update_last_message(response)
|
171 |
-
gradio_chatbot_output[-1][1] = response
|
172 |
|
173 |
yield gradio_chatbot_output, to_gradio_history(conversation), "Generating..."
|
174 |
|
|
|
3 |
import os
|
4 |
from PIL import Image
|
5 |
import spaces
|
6 |
+
import copy
|
7 |
|
8 |
from kimi_vl.serve.frontend import reload_javascript
|
9 |
from kimi_vl.serve.utils import (
|
|
|
89 |
system_prompt = conversation.system_template.format(system_message=conversation.system_message)
|
90 |
return system_prompt
|
91 |
|
92 |
+
def highlight_thinking(msg: str) -> str:
|
93 |
+
msg = copy.deepcopy()
|
94 |
+
if "◁think▷" in msg:
|
95 |
+
msg = msg.replace("◁think▷", "<b style='color:blue;'>🤔Thinking...</b>\n")
|
96 |
+
if "◁/think▷" in msg:
|
97 |
+
msg = msg.replace("◁/think▷", "\n<b style='color:purple;'>💡Summary</b>\n")
|
98 |
|
99 |
+
return msg
|
100 |
+
|
101 |
@wrap_gen_fn
|
102 |
@spaces.GPU(duration=180)
|
103 |
def predict(
|
|
|
177 |
full_response += x
|
178 |
response = strip_stop_words(full_response, stop_words)
|
179 |
conversation.update_last_message(response)
|
180 |
+
gradio_chatbot_output[-1][1] = highlight_thinking(response)
|
181 |
|
182 |
yield gradio_chatbot_output, to_gradio_history(conversation), "Generating..."
|
183 |
|