Spaces:
Running
Running
Commit
·
912e356
1
Parent(s):
fa3c0c0
test
Browse files- AIapp.py +97 -0
- Gradio_UI.py +17 -299
- HugginFaceUI.py +20 -0
- __pycache__/Gradio_UI.cpython-312.pyc +0 -0
- __pycache__/app.cpython-312.pyc +0 -0
- app.py +70 -26
- info/info.json +1 -1
AIapp.py
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import random
|
3 |
+
from smolagents import TransformersModel
|
4 |
+
from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
|
5 |
+
import datetime
|
6 |
+
import requests
|
7 |
+
import pytz
|
8 |
+
import yaml
|
9 |
+
import numpy as np
|
10 |
+
|
11 |
+
from tools.final_answer import FinalAnswerTool
|
12 |
+
from tools.visit_webpage import VisitWebpageTool
|
13 |
+
from tools.web_search import DuckDuckGoSearchTool
|
14 |
+
from typing import Optional, Tuple
|
15 |
+
|
16 |
+
from Gradio_UI import GradioUI
|
17 |
+
|
18 |
+
@tool
|
19 |
+
def provide_my_information(query: str) -> str:
|
20 |
+
"""
|
21 |
+
Provide information about me (Tianqing LIU)based on the user's query.
|
22 |
+
|
23 |
+
Args:
|
24 |
+
query: The user's question or request for information.
|
25 |
+
|
26 |
+
Returns:
|
27 |
+
str: A response containing the requested information.
|
28 |
+
"""
|
29 |
+
# Convert the query to lowercase for case-insensitive matching
|
30 |
+
query = query.lower()
|
31 |
+
my_info = None
|
32 |
+
with open("info/info.json", 'r') as file:
|
33 |
+
my_info = json.load(file)
|
34 |
+
# Check for specific keywords in the query and return the corresponding information
|
35 |
+
if "who" in query or "about" in query or "introduce" in query or "presentation" in query:
|
36 |
+
return f" {my_info['introduction']}."
|
37 |
+
if "name" in query:
|
38 |
+
return f"My name is {my_info['name']}."
|
39 |
+
elif "location" in query:
|
40 |
+
return f"I am located in {my_info['location']}."
|
41 |
+
elif "occupation" in query or "job" in query or "work" in query:
|
42 |
+
return f"I work as a {my_info['occupation']}."
|
43 |
+
elif "education" in query or "educational" in query:
|
44 |
+
return f"I have a {my_info['education']}."
|
45 |
+
elif "skills" in query or "what can you do" in query:
|
46 |
+
return f"My skills include: {', '.join(my_info['skills'])}."
|
47 |
+
elif "hobbies" in query or "interests" in query:
|
48 |
+
return f"My hobbies are: {', '.join(my_info['hobbies'])}."
|
49 |
+
elif "contact" in query or "email" in query or "linkedin" in query or "github" in query or "cv" in query or "resume" in query:
|
50 |
+
contact_info = my_info["contact"]
|
51 |
+
return (
|
52 |
+
f"You can contact me via email at {contact_info['email']}, "
|
53 |
+
f"connect with me on LinkedIn at {contact_info['linkedin']}, "
|
54 |
+
f"or check out my GitHub profile at {contact_info['github']}."
|
55 |
+
f"or check out my website at {contact_info['website']}."
|
56 |
+
)
|
57 |
+
else:
|
58 |
+
return "I'm sorry, I don't have information on that. Please ask about my name, location, occupation, education, skills, hobbies, or contact details."
|
59 |
+
|
60 |
+
|
61 |
+
final_answer = FinalAnswerTool()
|
62 |
+
visit_webpage = VisitWebpageTool()
|
63 |
+
web_search = DuckDuckGoSearchTool()
|
64 |
+
|
65 |
+
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
|
66 |
+
model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
|
67 |
+
#model = TransformersModel(model_id="HuggingFaceTB/SmolLM-135M-Instruct",max_tokens=1025)
|
68 |
+
model = HfApiModel(
|
69 |
+
max_tokens=2096,
|
70 |
+
temperature=0.5,
|
71 |
+
#model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
|
72 |
+
model_id=model_id,
|
73 |
+
# it is possible that this model may be overloaded
|
74 |
+
custom_role_conversions=None,
|
75 |
+
)
|
76 |
+
|
77 |
+
|
78 |
+
# Import tool from Hub
|
79 |
+
#image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
|
80 |
+
|
81 |
+
with open("prompts.yaml", 'r') as stream:
|
82 |
+
prompt_templates = yaml.safe_load(stream)
|
83 |
+
|
84 |
+
agent = CodeAgent(
|
85 |
+
model=model,
|
86 |
+
tools=[final_answer,provide_my_information], ## add your tools here (don't remove final answer)
|
87 |
+
max_steps=1,
|
88 |
+
verbosity_level=1,
|
89 |
+
grammar=None,
|
90 |
+
planning_interval=None,
|
91 |
+
name=None,
|
92 |
+
description=None,
|
93 |
+
prompt_templates=prompt_templates
|
94 |
+
)
|
95 |
+
|
96 |
+
|
97 |
+
GradioUI(agent).launch()
|
Gradio_UI.py
CHANGED
@@ -1,306 +1,24 @@
|
|
1 |
-
|
2 |
-
# coding=utf-8
|
3 |
-
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
4 |
-
#
|
5 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
-
# you may not use this file except in compliance with the License.
|
7 |
-
# You may obtain a copy of the License at
|
8 |
-
#
|
9 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
-
#
|
11 |
-
# Unless required by applicable law or agreed to in writing, software
|
12 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
-
# See the License for the specific language governing permissions and
|
15 |
-
# limitations under the License.
|
16 |
-
import mimetypes
|
17 |
-
import os
|
18 |
-
import re
|
19 |
-
import shutil
|
20 |
-
from typing import Optional
|
21 |
|
22 |
-
|
23 |
-
from smolagents.agents import ActionStep, MultiStepAgent
|
24 |
-
from smolagents.memory import MemoryStep
|
25 |
-
from smolagents.utils import _is_package_available
|
26 |
|
27 |
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
|
|
33 |
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
|
|
38 |
|
39 |
-
|
|
|
40 |
|
|
|
41 |
|
42 |
-
|
43 |
-
if hasattr(step_log, "tool_calls") and step_log.tool_calls is not None:
|
44 |
-
first_tool_call = step_log.tool_calls[0]
|
45 |
-
used_code = first_tool_call.name == "python_interpreter"
|
46 |
-
parent_id = f"call_{len(step_log.tool_calls)}"
|
47 |
-
|
48 |
-
# Tool call becomes the parent message with timing info
|
49 |
-
# First we will handle arguments based on type
|
50 |
-
args = first_tool_call.arguments
|
51 |
-
if isinstance(args, dict):
|
52 |
-
content = str(args.get("answer", str(args)))
|
53 |
-
else:
|
54 |
-
content = str(args).strip()
|
55 |
-
|
56 |
-
if used_code:
|
57 |
-
# Clean up the content by removing any end code tags
|
58 |
-
content = re.sub(r"```.*?\n", "", content) # Remove existing code blocks
|
59 |
-
content = re.sub(r"\s*<end_code>\s*", "", content) # Remove end_code tags
|
60 |
-
content = content.strip()
|
61 |
-
if not content.startswith("```python"):
|
62 |
-
content = f"```python\n{content}\n```"
|
63 |
-
|
64 |
-
parent_message_tool = gr.ChatMessage(
|
65 |
-
role="assistant",
|
66 |
-
content=content,
|
67 |
-
metadata={
|
68 |
-
"title": f"🛠️ Used tool {first_tool_call.name}",
|
69 |
-
"id": parent_id,
|
70 |
-
"status": "pending",
|
71 |
-
},
|
72 |
-
)
|
73 |
-
yield parent_message_tool
|
74 |
-
|
75 |
-
# Nesting execution logs under the tool call if they exist
|
76 |
-
if hasattr(step_log, "observations") and (
|
77 |
-
step_log.observations is not None and step_log.observations.strip()
|
78 |
-
): # Only yield execution logs if there's actual content
|
79 |
-
log_content = step_log.observations.strip()
|
80 |
-
if log_content:
|
81 |
-
log_content = re.sub(r"^Execution logs:\s*", "", log_content)
|
82 |
-
yield gr.ChatMessage(
|
83 |
-
role="assistant",
|
84 |
-
content=f"{log_content}",
|
85 |
-
metadata={"title": "📝 Execution Logs", "parent_id": parent_id, "status": "done"},
|
86 |
-
)
|
87 |
-
|
88 |
-
# Nesting any errors under the tool call
|
89 |
-
if hasattr(step_log, "error") and step_log.error is not None:
|
90 |
-
yield gr.ChatMessage(
|
91 |
-
role="assistant",
|
92 |
-
content=str(step_log.error),
|
93 |
-
metadata={"title": "💥 Error", "parent_id": parent_id, "status": "done"},
|
94 |
-
)
|
95 |
-
|
96 |
-
# Update parent message metadata to done status without yielding a new message
|
97 |
-
parent_message_tool.metadata["status"] = "done"
|
98 |
-
|
99 |
-
# Handle standalone errors but not from tool calls
|
100 |
-
elif hasattr(step_log, "error") and step_log.error is not None:
|
101 |
-
yield gr.ChatMessage(role="assistant", content=str(step_log.error), metadata={"title": "💥 Error"})
|
102 |
-
|
103 |
-
# Calculate duration and token information
|
104 |
-
step_footnote = f"{step_number}"
|
105 |
-
if hasattr(step_log, "input_token_count") and hasattr(step_log, "output_token_count"):
|
106 |
-
token_str = (
|
107 |
-
f" | Input-tokens:{step_log.input_token_count:,} | Output-tokens:{step_log.output_token_count:,}"
|
108 |
-
)
|
109 |
-
step_footnote += token_str
|
110 |
-
if hasattr(step_log, "duration"):
|
111 |
-
step_duration = f" | Duration: {round(float(step_log.duration), 2)}" if step_log.duration else None
|
112 |
-
step_footnote += step_duration
|
113 |
-
step_footnote = f"""<span style="color: #bbbbc2; font-size: 12px;">{step_footnote}</span> """
|
114 |
-
yield gr.ChatMessage(role="assistant", content=f"{step_footnote}")
|
115 |
-
yield gr.ChatMessage(role="assistant", content="-----")
|
116 |
-
|
117 |
-
|
118 |
-
def stream_to_gradio(
|
119 |
-
agent,
|
120 |
-
task: str,
|
121 |
-
reset_agent_memory: bool = False,
|
122 |
-
additional_args: Optional[dict] = None,
|
123 |
-
):
|
124 |
-
"""Runs an agent with the given task and streams the messages from the agent as gradio ChatMessages."""
|
125 |
-
if not _is_package_available("gradio"):
|
126 |
-
raise ModuleNotFoundError(
|
127 |
-
"Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[gradio]'`"
|
128 |
-
)
|
129 |
-
import gradio as gr
|
130 |
-
|
131 |
-
total_input_tokens = 0
|
132 |
-
total_output_tokens = 0
|
133 |
-
|
134 |
-
for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
|
135 |
-
# Track tokens if model provides them
|
136 |
-
if hasattr(agent.model, "last_input_token_count"):
|
137 |
-
total_input_tokens += agent.model.last_input_token_count
|
138 |
-
total_output_tokens += agent.model.last_output_token_count
|
139 |
-
if isinstance(step_log, ActionStep):
|
140 |
-
step_log.input_token_count = agent.model.last_input_token_count
|
141 |
-
step_log.output_token_count = agent.model.last_output_token_count
|
142 |
-
|
143 |
-
for message in pull_messages_from_step(
|
144 |
-
step_log,
|
145 |
-
):
|
146 |
-
yield message
|
147 |
-
|
148 |
-
final_answer = step_log # Last log is the run's final_answer
|
149 |
-
final_answer = handle_agent_output_types(final_answer)
|
150 |
-
|
151 |
-
if isinstance(final_answer, AgentText):
|
152 |
-
yield gr.ChatMessage(
|
153 |
-
role="assistant",
|
154 |
-
content=f"**Final answer:**\n{final_answer.to_string()}\n",
|
155 |
-
)
|
156 |
-
elif isinstance(final_answer, AgentImage):
|
157 |
-
yield gr.ChatMessage(
|
158 |
-
role="assistant",
|
159 |
-
content={"path": final_answer.to_string(), "mime_type": "image/png"},
|
160 |
-
)
|
161 |
-
elif isinstance(final_answer, AgentAudio):
|
162 |
-
yield gr.ChatMessage(
|
163 |
-
role="assistant",
|
164 |
-
content={"path": final_answer.to_string(), "mime_type": "audio/wav"},
|
165 |
-
)
|
166 |
-
else:
|
167 |
-
yield gr.ChatMessage(role="assistant", content=f"**Final answer:** {str(final_answer)}")
|
168 |
-
|
169 |
-
|
170 |
-
class GradioUI:
|
171 |
-
"""A one-line interface to launch your agent in Gradio"""
|
172 |
-
|
173 |
-
def __init__(self, agent: MultiStepAgent, file_upload_folder: str | None = None):
|
174 |
-
if not _is_package_available("gradio"):
|
175 |
-
raise ModuleNotFoundError(
|
176 |
-
"Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[gradio]'`"
|
177 |
-
)
|
178 |
-
self.agent = agent
|
179 |
-
self.file_upload_folder = file_upload_folder
|
180 |
-
if self.file_upload_folder is not None:
|
181 |
-
if not os.path.exists(file_upload_folder):
|
182 |
-
os.mkdir(file_upload_folder)
|
183 |
-
|
184 |
-
def interact_with_agent(self, prompt, messages):
|
185 |
-
import gradio as gr
|
186 |
-
|
187 |
-
messages.append(gr.ChatMessage(role="user", content=prompt))
|
188 |
-
yield messages
|
189 |
-
for msg in stream_to_gradio(self.agent, task=prompt, reset_agent_memory=False):
|
190 |
-
messages.append(msg)
|
191 |
-
yield messages
|
192 |
-
yield messages
|
193 |
-
|
194 |
-
def upload_file(
|
195 |
-
self,
|
196 |
-
file,
|
197 |
-
file_uploads_log,
|
198 |
-
allowed_file_types=[
|
199 |
-
"application/pdf",
|
200 |
-
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
201 |
-
"text/plain",
|
202 |
-
],
|
203 |
-
):
|
204 |
-
"""
|
205 |
-
Handle file uploads, default allowed types are .pdf, .docx, and .txt
|
206 |
-
"""
|
207 |
-
import gradio as gr
|
208 |
-
|
209 |
-
if file is None:
|
210 |
-
return gr.Textbox("No file uploaded", visible=True), file_uploads_log
|
211 |
-
|
212 |
-
try:
|
213 |
-
mime_type, _ = mimetypes.guess_type(file.name)
|
214 |
-
except Exception as e:
|
215 |
-
return gr.Textbox(f"Error: {e}", visible=True), file_uploads_log
|
216 |
-
|
217 |
-
if mime_type not in allowed_file_types:
|
218 |
-
return gr.Textbox("File type disallowed", visible=True), file_uploads_log
|
219 |
-
|
220 |
-
# Sanitize file name
|
221 |
-
original_name = os.path.basename(file.name)
|
222 |
-
sanitized_name = re.sub(
|
223 |
-
r"[^\w\-.]", "_", original_name
|
224 |
-
) # Replace any non-alphanumeric, non-dash, or non-dot characters with underscores
|
225 |
-
|
226 |
-
type_to_ext = {}
|
227 |
-
for ext, t in mimetypes.types_map.items():
|
228 |
-
if t not in type_to_ext:
|
229 |
-
type_to_ext[t] = ext
|
230 |
-
|
231 |
-
# Ensure the extension correlates to the mime type
|
232 |
-
sanitized_name = sanitized_name.split(".")[:-1]
|
233 |
-
sanitized_name.append("" + type_to_ext[mime_type])
|
234 |
-
sanitized_name = "".join(sanitized_name)
|
235 |
-
|
236 |
-
# Save the uploaded file to the specified folder
|
237 |
-
file_path = os.path.join(self.file_upload_folder, os.path.basename(sanitized_name))
|
238 |
-
shutil.copy(file.name, file_path)
|
239 |
-
|
240 |
-
return gr.Textbox(f"File uploaded: {file_path}", visible=True), file_uploads_log + [file_path]
|
241 |
-
|
242 |
-
def log_user_message(self, text_input, file_uploads_log):
|
243 |
-
return (
|
244 |
-
text_input
|
245 |
-
+ (
|
246 |
-
f"\nYou have been provided with these files, which might be helpful or not: {file_uploads_log}"
|
247 |
-
if len(file_uploads_log) > 0
|
248 |
-
else ""
|
249 |
-
),
|
250 |
-
"",
|
251 |
-
)
|
252 |
-
|
253 |
-
def launch(self, **kwargs):
|
254 |
-
import gradio as gr
|
255 |
-
examples = [
|
256 |
-
{
|
257 |
-
"text": "Calculate the VaR for returns: 0.1, -0.2, 0.05, -0.15, 0.3", # Message to populate
|
258 |
-
"display_text": "Example 1: Calculate VaR", # Text to display in the example box
|
259 |
-
|
260 |
-
# Optional icon
|
261 |
-
},
|
262 |
-
{
|
263 |
-
"text": "Create a study plan for FRM Part 1.", # Message to populate
|
264 |
-
"display_text": "Example 2: Create a study plan for FRM Part 1.", # Text to display in the example box
|
265 |
-
# Optional icon
|
266 |
-
},
|
267 |
-
{
|
268 |
-
"text": "Give me a practice question on bond valuation.", # Message to populate
|
269 |
-
"display_text": "Example 3: Give me a practice question on bond valuation.", # Text to display in the example box
|
270 |
-
},
|
271 |
-
]
|
272 |
-
with gr.Blocks(fill_height=True) as demo:
|
273 |
-
stored_messages = gr.State([])
|
274 |
-
file_uploads_log = gr.State([])
|
275 |
-
chatbot = gr.Chatbot(
|
276 |
-
label="Agent",
|
277 |
-
type="messages",
|
278 |
-
avatar_images=(
|
279 |
-
None,
|
280 |
-
"info/panda.png",
|
281 |
-
),
|
282 |
-
resizeable=True,
|
283 |
-
scale=1,
|
284 |
-
# Description
|
285 |
-
#examples=examples, # Example inputs
|
286 |
-
)
|
287 |
-
# If an upload folder is provided, enable the upload feature
|
288 |
-
if self.file_upload_folder is not None:
|
289 |
-
upload_file = gr.File(label="Upload a file")
|
290 |
-
upload_status = gr.Textbox(label="Upload Status", interactive=False, visible=False)
|
291 |
-
upload_file.change(
|
292 |
-
self.upload_file,
|
293 |
-
[upload_file, file_uploads_log],
|
294 |
-
[upload_status, file_uploads_log],
|
295 |
-
)
|
296 |
-
text_input = gr.Textbox(lines=1, label="Chat Message")
|
297 |
-
text_input.submit(
|
298 |
-
self.log_user_message,
|
299 |
-
[text_input, file_uploads_log],
|
300 |
-
[stored_messages, text_input],
|
301 |
-
).then(self.interact_with_agent, [stored_messages, chatbot], [chatbot])
|
302 |
-
|
303 |
-
demo.launch(debug=True, share=True, **kwargs)
|
304 |
-
|
305 |
-
|
306 |
-
__all__ = ["stream_to_gradio", "GradioUI"]
|
|
|
1 |
+
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
+
import app
|
|
|
|
|
|
|
4 |
|
5 |
|
6 |
+
# Define the chatbot function
|
7 |
+
def chatbot_response(user_input, history):
|
8 |
+
history = history or []
|
9 |
+
response = app.chatbot_response(user_input) # Call the chatbot logic
|
10 |
+
history.append((user_input, response))
|
11 |
+
return history, history
|
12 |
|
13 |
+
# Create the Gradio interface
|
14 |
+
with gr.Blocks() as demo:
|
15 |
+
chatbot = gr.Chatbot(label="Chat with Me")
|
16 |
+
msg = gr.Textbox(label="Your Message")
|
17 |
+
clear = gr.Button("Clear")
|
18 |
|
19 |
+
msg.submit(chatbot_response, [msg, chatbot], [chatbot, msg])
|
20 |
+
clear.click(lambda: None, None, chatbot, queue=False)
|
21 |
|
22 |
+
# Launch the interface
|
23 |
|
24 |
+
#demo = gr.Interface(fn=chatbot_response, inputs="text", outputs="text", title="My Personal Chatbot", description="Ask me anything about myself!")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
HugginFaceUI.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
2 |
+
import gradio as gr
|
3 |
+
|
4 |
+
checkpoint = "HuggingFaceTB/SmolLM2-135M-Instruct"
|
5 |
+
device = "cpu" # "cuda" or "cpu"
|
6 |
+
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
7 |
+
model = AutoModelForCausalLM.from_pretrained(checkpoint).to(device)
|
8 |
+
|
9 |
+
def predict(message, history):
|
10 |
+
history.append({"role": "user", "content": message})
|
11 |
+
input_text = tokenizer.apply_chat_template(history, tokenize=False)
|
12 |
+
inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
|
13 |
+
outputs = model.generate(inputs, max_new_tokens=100, temperature=0.2, top_p=0.9, do_sample=True)
|
14 |
+
decoded = tokenizer.decode(outputs[0])
|
15 |
+
response = decoded.split("<|im_start|>assistant\n")[-1].split("<|im_end|>")[0]
|
16 |
+
return response
|
17 |
+
|
18 |
+
demo = gr.ChatInterface(predict, type="messages")
|
19 |
+
|
20 |
+
demo.launch()
|
__pycache__/Gradio_UI.cpython-312.pyc
CHANGED
Binary files a/__pycache__/Gradio_UI.cpython-312.pyc and b/__pycache__/Gradio_UI.cpython-312.pyc differ
|
|
__pycache__/app.cpython-312.pyc
ADDED
Binary file (1.88 kB). View file
|
|
app.py
CHANGED
@@ -1,4 +1,7 @@
|
|
1 |
import json
|
|
|
|
|
|
|
2 |
import random
|
3 |
from smolagents import TransformersModel
|
4 |
from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
|
@@ -13,7 +16,25 @@ from tools.visit_webpage import VisitWebpageTool
|
|
13 |
from tools.web_search import DuckDuckGoSearchTool
|
14 |
from typing import Optional, Tuple
|
15 |
|
16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
@tool
|
19 |
def provide_my_information(query: str) -> str:
|
@@ -58,32 +79,9 @@ def provide_my_information(query: str) -> str:
|
|
58 |
return "I'm sorry, I don't have information on that. Please ask about my name, location, occupation, education, skills, hobbies, or contact details."
|
59 |
|
60 |
|
61 |
-
final_answer = FinalAnswerTool()
|
62 |
-
visit_webpage = VisitWebpageTool()
|
63 |
-
web_search = DuckDuckGoSearchTool()
|
64 |
-
|
65 |
-
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
|
66 |
-
model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
|
67 |
-
#model = TransformersModel(model_id="HuggingFaceTB/SmolLM-135M-Instruct",max_tokens=1025)
|
68 |
-
model = HfApiModel(
|
69 |
-
max_tokens=2096,
|
70 |
-
temperature=0.5,
|
71 |
-
#model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
|
72 |
-
model_id=model_id,
|
73 |
-
# it is possible that this model may be overloaded
|
74 |
-
custom_role_conversions=None,
|
75 |
-
)
|
76 |
-
|
77 |
-
|
78 |
-
# Import tool from Hub
|
79 |
-
#image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
|
80 |
-
|
81 |
-
with open("prompts.yaml", 'r') as stream:
|
82 |
-
prompt_templates = yaml.safe_load(stream)
|
83 |
-
|
84 |
agent = CodeAgent(
|
85 |
model=model,
|
86 |
-
tools=[final_answer,provide_my_information],
|
87 |
max_steps=1,
|
88 |
verbosity_level=1,
|
89 |
grammar=None,
|
@@ -94,4 +92,50 @@ agent = CodeAgent(
|
|
94 |
)
|
95 |
|
96 |
|
97 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import json
|
2 |
+
import gradio as gr
|
3 |
+
from smolagents import CodeAgent
|
4 |
+
import json
|
5 |
import random
|
6 |
from smolagents import TransformersModel
|
7 |
from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
|
|
|
16 |
from tools.web_search import DuckDuckGoSearchTool
|
17 |
from typing import Optional, Tuple
|
18 |
|
19 |
+
model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
|
20 |
+
model = HfApiModel(
|
21 |
+
max_tokens=2096,
|
22 |
+
temperature=0.5,
|
23 |
+
# model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
|
24 |
+
model_id=model_id,
|
25 |
+
# it is possible that this model may be overloaded
|
26 |
+
custom_role_conversions=None,
|
27 |
+
)
|
28 |
+
|
29 |
+
# Import tool from Hub
|
30 |
+
# image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
|
31 |
+
|
32 |
+
with open("prompts.yaml", 'r') as stream:
|
33 |
+
prompt_templates = yaml.safe_load(stream)
|
34 |
+
|
35 |
+
final_answer = FinalAnswerTool()
|
36 |
+
visit_webpage = VisitWebpageTool()
|
37 |
+
web_search = DuckDuckGoSearchTool()
|
38 |
|
39 |
@tool
|
40 |
def provide_my_information(query: str) -> str:
|
|
|
79 |
return "I'm sorry, I don't have information on that. Please ask about my name, location, occupation, education, skills, hobbies, or contact details."
|
80 |
|
81 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
agent = CodeAgent(
|
83 |
model=model,
|
84 |
+
tools=[final_answer, provide_my_information], ## add your tools here (don't remove final answer)
|
85 |
max_steps=1,
|
86 |
verbosity_level=1,
|
87 |
grammar=None,
|
|
|
92 |
)
|
93 |
|
94 |
|
95 |
+
def chatbot_response_json(user_input):
|
96 |
+
my_info = None
|
97 |
+
user_input = user_input.lower()
|
98 |
+
with open("info/info.json", 'r') as file:
|
99 |
+
my_info = json.load(file)
|
100 |
+
|
101 |
+
if "who" in user_input or "about" in user_input or "introduce" in user_input or "presentation" in user_input:
|
102 |
+
return f" {my_info['introduction']}."
|
103 |
+
elif "name" in user_input:
|
104 |
+
return f"My name is {my_info['name']}."
|
105 |
+
elif "hello" in user_input:
|
106 |
+
return f"Hello My name is {my_info['name']}."
|
107 |
+
elif "bye" in user_input or "bye" in user_input:
|
108 |
+
return f"Bye."
|
109 |
+
elif "location" in user_input:
|
110 |
+
return f"I am located in {my_info['location']}."
|
111 |
+
elif "occupation" in user_input or "job" in user_input or "work" in user_input:
|
112 |
+
return f"I work as a {my_info['occupation']}."
|
113 |
+
elif "education" in user_input or "educational" in user_input:
|
114 |
+
return f"I have a {my_info['education']}."
|
115 |
+
elif "skills" in user_input or "what can you do" in user_input:
|
116 |
+
return f"My skills include: {', '.join(my_info['skills'])}."
|
117 |
+
elif "hobbies" in user_input or "interests" in user_input:
|
118 |
+
return f"My hobbies are: {', '.join(my_info['hobbies'])}."
|
119 |
+
elif "contact" in user_input or "email" in user_input or "linkedin" in user_input or "github" in user_input or "cv" in user_input or "resume" in user_input:
|
120 |
+
contact_info = my_info["contact"]
|
121 |
+
return (
|
122 |
+
f"You can contact me via email at {contact_info['email']}, "
|
123 |
+
f"connect with me on LinkedIn at {contact_info['linkedin']}, "
|
124 |
+
f"or check out my GitHub profile at {contact_info['github']}."
|
125 |
+
f"or check out my website at {contact_info['website']}."
|
126 |
+
)
|
127 |
+
else:
|
128 |
+
return agent.run(user_input)
|
129 |
+
|
130 |
+
|
131 |
+
def chatbot_response(user_input, history):
|
132 |
+
history = history or []
|
133 |
+
response = chatbot_response_json(user_input) # Call the chatbot logic
|
134 |
+
#history.append((user_input, response))
|
135 |
+
return response
|
136 |
+
|
137 |
+
|
138 |
+
# Create the Gradio interface
|
139 |
+
demo = gr.ChatInterface(fn=chatbot_response, title="My Personal Chatbot", description="Ask me anything about myself!")
|
140 |
+
|
141 |
+
demo.launch()
|
info/info.json
CHANGED
@@ -11,7 +11,7 @@
|
|
11 |
"github": "https://liuti-ma.github.io/CV/",
|
12 |
"website": "https://www.liutianqing.com"
|
13 |
},
|
14 |
-
"introduction":"My name is Tianqing Liu, and I am an experienced Product Manager with over 10 years of expertise in financial technology, risk management, and SaaS solutions. Currently based in Paris, France, I hold several professional certifications, including FRM® (Financial Risk Manager), SAFe® 6 Product Owner/Product Manager, and SCR® (Sustainability and Climate Risk).",
|
15 |
"career": "Throughout my career, I have worked with global financial institutions such as Nedbank, OCBC, and Rabobank, delivering high-impact solutions that streamline regulatory compliance, enhance operational efficiency, and improve customer satisfaction. My experience spans across Agile product development, risk analysis, and implementing scalable cloud-based platforms on AWS.\n\nAt Moody's Analytics, where I have been working since 2015, I have successfully managed SaaS integrations, developed BI reporting tools, and contributed to the evolution of Moody’s Banking Cloud Platform. Prior to that, I worked as a Software Engineer at DXC Technology, where I developed financial solutions for major clients like Crédit Agricole and BPI-France.",
|
16 |
"language": "In addition to my technical and professional expertise, I am fluent in Chinese, French, and English, with conversational skills in Spanish. I am passionate about leveraging technology to solve complex problems and drive innovation in the financial sector."
|
17 |
|
|
|
11 |
"github": "https://liuti-ma.github.io/CV/",
|
12 |
"website": "https://www.liutianqing.com"
|
13 |
},
|
14 |
+
"introduction":"Thank you for the opportunity to introduce myself. My name is Tianqing Liu, and I am an experienced Product Manager with over 10 years of expertise in financial technology, risk management, and SaaS solutions. Currently based in Paris, France, I hold several professional certifications, including FRM® (Financial Risk Manager), SAFe® 6 Product Owner/Product Manager, and SCR® (Sustainability and Climate Risk).Throughout my career, I have worked with global financial institutions such as Nedbank, OCBC, and Rabobank, delivering high-impact solutions that streamline regulatory compliance, enhance operational efficiency, and improve customer satisfaction. My experience spans across Agile product development, risk analysis, and implementing scalable cloud-based platforms on AWS.At Moody's Analytics, where I have been working since 2015, I have successfully managed SaaS integrations, developed BI reporting tools, and contributed to the evolution of Moody’s Banking Cloud Platform. Prior to that, I worked as a Software Engineer at DXC Technology, where I developed financial solutions for major clients like Crédit Agricole and BPI-France.I hold a Master’s degree in Business Informatics from Paris Dauphine University and a Bachelor of Science in Computer Science from Pantheon-Sorbonne University. My technical skills include proficiency in programming languages like Java, PL/SQL, and Python, as well as expertise in data analytics, cloud frameworks, and financial instruments.In addition to my technical and professional expertise, I am fluent in Chinese, French, and English, with conversational skills in Spanish. I am passionate about leveraging technology to solve complex problems and drive innovation in the financial sector.I look forward to discussing how my background and skills align with the goals of your team.",
|
15 |
"career": "Throughout my career, I have worked with global financial institutions such as Nedbank, OCBC, and Rabobank, delivering high-impact solutions that streamline regulatory compliance, enhance operational efficiency, and improve customer satisfaction. My experience spans across Agile product development, risk analysis, and implementing scalable cloud-based platforms on AWS.\n\nAt Moody's Analytics, where I have been working since 2015, I have successfully managed SaaS integrations, developed BI reporting tools, and contributed to the evolution of Moody’s Banking Cloud Platform. Prior to that, I worked as a Software Engineer at DXC Technology, where I developed financial solutions for major clients like Crédit Agricole and BPI-France.",
|
16 |
"language": "In addition to my technical and professional expertise, I am fluent in Chinese, French, and English, with conversational skills in Spanish. I am passionate about leveraging technology to solve complex problems and drive innovation in the financial sector."
|
17 |
|