Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -26,6 +26,8 @@ class ConversationManager:
|
|
26 |
formatted_response = self.format_response(model_response)
|
27 |
self.model_history.append((user_message, model_response))
|
28 |
self.user_history.append((user_message, formatted_response))
|
|
|
|
|
29 |
|
30 |
def format_response(self, response):
|
31 |
"""Format response for UI while keeping raw text for model."""
|
@@ -57,6 +59,8 @@ def stream_chat(
|
|
57 |
print(f'User Message: {message}')
|
58 |
|
59 |
model_history = conversation_manager.get_model_history()
|
|
|
|
|
60 |
conversation = []
|
61 |
for prompt, answer in model_history:
|
62 |
conversation.extend([
|
@@ -65,6 +69,8 @@ def stream_chat(
|
|
65 |
])
|
66 |
conversation.append({"role": "user", "content": message})
|
67 |
|
|
|
|
|
68 |
input_ids = tokenizer.apply_chat_template(
|
69 |
conversation, add_generation_prompt=True, return_tensors="pt"
|
70 |
).to(model.device)
|
@@ -95,6 +101,7 @@ def stream_chat(
|
|
95 |
for new_text in streamer:
|
96 |
buffer += new_text
|
97 |
original_response += new_text
|
|
|
98 |
yield conversation_manager.format_response(buffer)
|
99 |
|
100 |
conversation_manager.add_exchange(message, original_response)
|
|
|
26 |
formatted_response = self.format_response(model_response)
|
27 |
self.model_history.append((user_message, model_response))
|
28 |
self.user_history.append((user_message, formatted_response))
|
29 |
+
print(f"\nModel History Updated: {self.model_history}")
|
30 |
+
print(f"\nUser History Updated: {self.user_history}")
|
31 |
|
32 |
def format_response(self, response):
|
33 |
"""Format response for UI while keeping raw text for model."""
|
|
|
59 |
print(f'User Message: {message}')
|
60 |
|
61 |
model_history = conversation_manager.get_model_history()
|
62 |
+
print(f'Model History: {model_history}')
|
63 |
+
|
64 |
conversation = []
|
65 |
for prompt, answer in model_history:
|
66 |
conversation.extend([
|
|
|
69 |
])
|
70 |
conversation.append({"role": "user", "content": message})
|
71 |
|
72 |
+
print(f'Formatted Conversation for Model: {conversation}')
|
73 |
+
|
74 |
input_ids = tokenizer.apply_chat_template(
|
75 |
conversation, add_generation_prompt=True, return_tensors="pt"
|
76 |
).to(model.device)
|
|
|
101 |
for new_text in streamer:
|
102 |
buffer += new_text
|
103 |
original_response += new_text
|
104 |
+
print(f'Streaming: {buffer}')
|
105 |
yield conversation_manager.format_response(buffer)
|
106 |
|
107 |
conversation_manager.add_exchange(message, original_response)
|