Spaces:
Runtime error
Runtime error
якась відносно робоча версія
Browse files- .gitignore +47 -0
- .gradio/certificate.pem +31 -0
- Gradio_UI.py +119 -282
- agent.json +11 -23
- agent.py +196 -0
- app.py +80 -56
- healthcare_prompts.yaml +49 -0
- list_models.py +29 -0
- test_curl.sh +17 -0
- tools/__init__.py +0 -0
- tools/final_answer.py +59 -4
- tools/healthcare_llm_visualizer.py +80 -0
- tools/web_search.py +102 -9
.gitignore
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Environment variables
|
| 2 |
+
.env
|
| 3 |
+
|
| 4 |
+
# Python
|
| 5 |
+
__pycache__/
|
| 6 |
+
*.py[cod]
|
| 7 |
+
*$py.class
|
| 8 |
+
*.so
|
| 9 |
+
.Python
|
| 10 |
+
env/
|
| 11 |
+
build/
|
| 12 |
+
develop-eggs/
|
| 13 |
+
dist/
|
| 14 |
+
downloads/
|
| 15 |
+
eggs/
|
| 16 |
+
.eggs/
|
| 17 |
+
lib/
|
| 18 |
+
lib64/
|
| 19 |
+
parts/
|
| 20 |
+
sdist/
|
| 21 |
+
var/
|
| 22 |
+
wheels/
|
| 23 |
+
*.egg-info/
|
| 24 |
+
.installed.cfg
|
| 25 |
+
*.egg
|
| 26 |
+
|
| 27 |
+
# Virtual Environment
|
| 28 |
+
venv/
|
| 29 |
+
ENV/
|
| 30 |
+
|
| 31 |
+
# Logs
|
| 32 |
+
*.log
|
| 33 |
+
logs/
|
| 34 |
+
|
| 35 |
+
# Upload and results directories
|
| 36 |
+
uploads/
|
| 37 |
+
results/
|
| 38 |
+
|
| 39 |
+
# IDE specific files
|
| 40 |
+
.idea/
|
| 41 |
+
.vscode/
|
| 42 |
+
*.swp
|
| 43 |
+
*.swo
|
| 44 |
+
|
| 45 |
+
# OS specific files
|
| 46 |
+
.DS_Store
|
| 47 |
+
Thumbs.db
|
.gradio/certificate.pem
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-----BEGIN CERTIFICATE-----
|
| 2 |
+
MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
|
| 3 |
+
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
|
| 4 |
+
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
|
| 5 |
+
WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
|
| 6 |
+
ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
|
| 7 |
+
MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
|
| 8 |
+
h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
|
| 9 |
+
0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
|
| 10 |
+
A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
|
| 11 |
+
T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
|
| 12 |
+
B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
|
| 13 |
+
B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
|
| 14 |
+
KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
|
| 15 |
+
OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
|
| 16 |
+
jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
|
| 17 |
+
qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
|
| 18 |
+
rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
|
| 19 |
+
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
|
| 20 |
+
hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
|
| 21 |
+
ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
|
| 22 |
+
3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
|
| 23 |
+
NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
|
| 24 |
+
ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
|
| 25 |
+
TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
|
| 26 |
+
jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
|
| 27 |
+
oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
|
| 28 |
+
4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
|
| 29 |
+
mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
|
| 30 |
+
emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
|
| 31 |
+
-----END CERTIFICATE-----
|
Gradio_UI.py
CHANGED
|
@@ -1,296 +1,133 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
#
|
| 5 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
-
# you may not use this file except in compliance with the License.
|
| 7 |
-
# You may obtain a copy of the License at
|
| 8 |
-
#
|
| 9 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
-
#
|
| 11 |
-
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
-
# See the License for the specific language governing permissions and
|
| 15 |
-
# limitations under the License.
|
| 16 |
-
import mimetypes
|
| 17 |
import os
|
| 18 |
-
import re
|
| 19 |
-
import shutil
|
| 20 |
-
from typing import Optional
|
| 21 |
|
| 22 |
-
|
| 23 |
-
from smolagents.agents import ActionStep, MultiStepAgent
|
| 24 |
-
from smolagents.memory import MemoryStep
|
| 25 |
-
from smolagents.utils import _is_package_available
|
| 26 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
used_code = first_tool_call.name == "python_interpreter"
|
| 54 |
-
parent_id = f"call_{len(step_log.tool_calls)}"
|
| 55 |
-
|
| 56 |
-
# Tool call becomes the parent message with timing info
|
| 57 |
-
# First we will handle arguments based on type
|
| 58 |
-
args = first_tool_call.arguments
|
| 59 |
-
if isinstance(args, dict):
|
| 60 |
-
content = str(args.get("answer", str(args)))
|
| 61 |
-
else:
|
| 62 |
-
content = str(args).strip()
|
| 63 |
-
|
| 64 |
-
if used_code:
|
| 65 |
-
# Clean up the content by removing any end code tags
|
| 66 |
-
content = re.sub(r"```.*?\n", "", content) # Remove existing code blocks
|
| 67 |
-
content = re.sub(r"\s*<end_code>\s*", "", content) # Remove end_code tags
|
| 68 |
-
content = content.strip()
|
| 69 |
-
if not content.startswith("```python"):
|
| 70 |
-
content = f"```python\n{content}\n```"
|
| 71 |
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 80 |
)
|
| 81 |
-
yield parent_message_tool
|
| 82 |
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
)
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
# Calculate duration and token information
|
| 112 |
-
step_footnote = f"{step_number}"
|
| 113 |
-
if hasattr(step_log, "input_token_count") and hasattr(step_log, "output_token_count"):
|
| 114 |
-
token_str = (
|
| 115 |
-
f" | Input-tokens:{step_log.input_token_count:,} | Output-tokens:{step_log.output_token_count:,}"
|
| 116 |
)
|
| 117 |
-
step_footnote += token_str
|
| 118 |
-
if hasattr(step_log, "duration"):
|
| 119 |
-
step_duration = f" | Duration: {round(float(step_log.duration), 2)}" if step_log.duration else None
|
| 120 |
-
step_footnote += step_duration
|
| 121 |
-
step_footnote = f"""<span style="color: #bbbbc2; font-size: 12px;">{step_footnote}</span> """
|
| 122 |
-
yield gr.ChatMessage(role="assistant", content=f"{step_footnote}")
|
| 123 |
-
yield gr.ChatMessage(role="assistant", content="-----")
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
def stream_to_gradio(
|
| 127 |
-
agent,
|
| 128 |
-
task: str,
|
| 129 |
-
reset_agent_memory: bool = False,
|
| 130 |
-
additional_args: Optional[dict] = None,
|
| 131 |
-
):
|
| 132 |
-
"""Runs an agent with the given task and streams the messages from the agent as gradio ChatMessages."""
|
| 133 |
-
if not _is_package_available("gradio"):
|
| 134 |
-
raise ModuleNotFoundError(
|
| 135 |
-
"Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[gradio]'`"
|
| 136 |
-
)
|
| 137 |
-
import gradio as gr
|
| 138 |
-
|
| 139 |
-
total_input_tokens = 0
|
| 140 |
-
total_output_tokens = 0
|
| 141 |
-
|
| 142 |
-
for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
|
| 143 |
-
# Track tokens if model provides them
|
| 144 |
-
if hasattr(agent.model, "last_input_token_count"):
|
| 145 |
-
total_input_tokens += agent.model.last_input_token_count
|
| 146 |
-
total_output_tokens += agent.model.last_output_token_count
|
| 147 |
-
if isinstance(step_log, ActionStep):
|
| 148 |
-
step_log.input_token_count = agent.model.last_input_token_count
|
| 149 |
-
step_log.output_token_count = agent.model.last_output_token_count
|
| 150 |
-
|
| 151 |
-
for message in pull_messages_from_step(
|
| 152 |
-
step_log,
|
| 153 |
-
):
|
| 154 |
-
yield message
|
| 155 |
-
|
| 156 |
-
final_answer = step_log # Last log is the run's final_answer
|
| 157 |
-
final_answer = handle_agent_output_types(final_answer)
|
| 158 |
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
)
|
| 169 |
-
elif isinstance(final_answer, AgentAudio):
|
| 170 |
-
yield gr.ChatMessage(
|
| 171 |
-
role="assistant",
|
| 172 |
-
content={"path": final_answer.to_string(), "mime_type": "audio/wav"},
|
| 173 |
-
)
|
| 174 |
-
else:
|
| 175 |
-
yield gr.ChatMessage(role="assistant", content=f"**Final answer:** {str(final_answer)}")
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
class GradioUI:
|
| 179 |
-
"""A one-line interface to launch your agent in Gradio"""
|
| 180 |
-
|
| 181 |
-
def __init__(self, agent: MultiStepAgent, file_upload_folder: str | None = None):
|
| 182 |
-
if not _is_package_available("gradio"):
|
| 183 |
-
raise ModuleNotFoundError(
|
| 184 |
-
"Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[gradio]'`"
|
| 185 |
)
|
| 186 |
-
self.agent = agent
|
| 187 |
-
self.file_upload_folder = file_upload_folder
|
| 188 |
-
if self.file_upload_folder is not None:
|
| 189 |
-
if not os.path.exists(file_upload_folder):
|
| 190 |
-
os.mkdir(file_upload_folder)
|
| 191 |
-
|
| 192 |
-
def interact_with_agent(self, prompt, messages):
|
| 193 |
-
import gradio as gr
|
| 194 |
-
|
| 195 |
-
messages.append(gr.ChatMessage(role="user", content=prompt))
|
| 196 |
-
yield messages
|
| 197 |
-
for msg in stream_to_gradio(self.agent, task=prompt, reset_agent_memory=False):
|
| 198 |
-
messages.append(msg)
|
| 199 |
-
yield messages
|
| 200 |
-
yield messages
|
| 201 |
-
|
| 202 |
-
def upload_file(
|
| 203 |
-
self,
|
| 204 |
-
file,
|
| 205 |
-
file_uploads_log,
|
| 206 |
-
allowed_file_types=[
|
| 207 |
-
"application/pdf",
|
| 208 |
-
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
| 209 |
-
"text/plain",
|
| 210 |
-
],
|
| 211 |
-
):
|
| 212 |
-
"""
|
| 213 |
-
Handle file uploads, default allowed types are .pdf, .docx, and .txt
|
| 214 |
-
"""
|
| 215 |
-
import gradio as gr
|
| 216 |
-
|
| 217 |
-
if file is None:
|
| 218 |
-
return gr.Textbox("No file uploaded", visible=True), file_uploads_log
|
| 219 |
-
|
| 220 |
-
try:
|
| 221 |
-
mime_type, _ = mimetypes.guess_type(file.name)
|
| 222 |
-
except Exception as e:
|
| 223 |
-
return gr.Textbox(f"Error: {e}", visible=True), file_uploads_log
|
| 224 |
-
|
| 225 |
-
if mime_type not in allowed_file_types:
|
| 226 |
-
return gr.Textbox("File type disallowed", visible=True), file_uploads_log
|
| 227 |
|
| 228 |
-
|
| 229 |
-
original_name = os.path.basename(file.name)
|
| 230 |
-
sanitized_name = re.sub(
|
| 231 |
-
r"[^\w\-.]", "_", original_name
|
| 232 |
-
) # Replace any non-alphanumeric, non-dash, or non-dot characters with underscores
|
| 233 |
-
|
| 234 |
-
type_to_ext = {}
|
| 235 |
-
for ext, t in mimetypes.types_map.items():
|
| 236 |
-
if t not in type_to_ext:
|
| 237 |
-
type_to_ext[t] = ext
|
| 238 |
-
|
| 239 |
-
# Ensure the extension correlates to the mime type
|
| 240 |
-
sanitized_name = sanitized_name.split(".")[:-1]
|
| 241 |
-
sanitized_name.append("" + type_to_ext[mime_type])
|
| 242 |
-
sanitized_name = "".join(sanitized_name)
|
| 243 |
-
|
| 244 |
-
# Save the uploaded file to the specified folder
|
| 245 |
-
file_path = os.path.join(self.file_upload_folder, os.path.basename(sanitized_name))
|
| 246 |
-
shutil.copy(file.name, file_path)
|
| 247 |
-
|
| 248 |
-
return gr.Textbox(f"File uploaded: {file_path}", visible=True), file_uploads_log + [file_path]
|
| 249 |
-
|
| 250 |
-
def log_user_message(self, text_input, file_uploads_log):
|
| 251 |
-
return (
|
| 252 |
-
text_input
|
| 253 |
-
+ (
|
| 254 |
-
f"\nYou have been provided with these files, which might be helpful or not: {file_uploads_log}"
|
| 255 |
-
if len(file_uploads_log) > 0
|
| 256 |
-
else ""
|
| 257 |
-
),
|
| 258 |
-
"",
|
| 259 |
-
)
|
| 260 |
|
| 261 |
def launch(self, **kwargs):
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
None,
|
| 272 |
-
"https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/Alfred.png",
|
| 273 |
-
),
|
| 274 |
-
resizeable=True,
|
| 275 |
-
scale=1,
|
| 276 |
-
)
|
| 277 |
-
# If an upload folder is provided, enable the upload feature
|
| 278 |
-
if self.file_upload_folder is not None:
|
| 279 |
-
upload_file = gr.File(label="Upload a file")
|
| 280 |
-
upload_status = gr.Textbox(label="Upload Status", interactive=False, visible=False)
|
| 281 |
-
upload_file.change(
|
| 282 |
-
self.upload_file,
|
| 283 |
-
[upload_file, file_uploads_log],
|
| 284 |
-
[upload_status, file_uploads_log],
|
| 285 |
-
)
|
| 286 |
-
text_input = gr.Textbox(lines=1, label="Chat Message")
|
| 287 |
-
text_input.submit(
|
| 288 |
-
self.log_user_message,
|
| 289 |
-
[text_input, file_uploads_log],
|
| 290 |
-
[stored_messages, text_input],
|
| 291 |
-
).then(self.interact_with_agent, [stored_messages, chatbot], [chatbot])
|
| 292 |
-
|
| 293 |
-
demo.launch(debug=True, share=True, **kwargs)
|
| 294 |
-
|
| 295 |
-
|
| 296 |
-
__all__ = ["stream_to_gradio", "GradioUI"]
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import logging
|
| 3 |
+
from pathlib import Path
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
import os
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
+
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
|
| 7 |
|
| 8 |
+
class GradioUI:
|
| 9 |
+
def __init__(self, agent, file_upload_folder='./uploads'):
|
| 10 |
+
self.agent = agent
|
| 11 |
+
self.file_upload_folder = Path(file_upload_folder)
|
| 12 |
+
self.file_upload_folder.mkdir(exist_ok=True)
|
| 13 |
+
|
| 14 |
+
def build_interface(self):
|
| 15 |
+
with gr.Blocks(theme=gr.themes.Soft()) as interface:
|
| 16 |
+
with gr.Row():
|
| 17 |
+
chatbot = gr.Chatbot(
|
| 18 |
+
label="Research Assistant",
|
| 19 |
+
height=600,
|
| 20 |
+
show_copy_button=True
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
# Hidden by default file upload section
|
| 24 |
+
with gr.Row(visible=False) as file_upload_row:
|
| 25 |
+
upload_file = gr.File(
|
| 26 |
+
label="Upload File",
|
| 27 |
+
file_types=[".csv", ".xlsx", ".txt", ".pdf", ".doc", ".docx"]
|
| 28 |
+
)
|
| 29 |
+
upload_status = gr.Textbox(
|
| 30 |
+
label="Upload Status",
|
| 31 |
+
interactive=False,
|
| 32 |
+
visible=False
|
| 33 |
+
)
|
| 34 |
|
| 35 |
+
with gr.Row():
|
| 36 |
+
text_input = gr.Textbox(
|
| 37 |
+
label="Enter your research query",
|
| 38 |
+
placeholder="Enter your query here...",
|
| 39 |
+
lines=2
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
with gr.Row():
|
| 43 |
+
submit_btn = gr.Button("Submit", variant="primary")
|
| 44 |
+
clear_btn = gr.Button("Clear")
|
| 45 |
+
toggle_upload_btn = gr.Button("Toggle File Upload")
|
| 46 |
+
|
| 47 |
+
# Store conversation state
|
| 48 |
+
state = gr.State([])
|
| 49 |
+
file_history = gr.State([])
|
| 50 |
+
|
| 51 |
+
# Event handlers
|
| 52 |
+
def toggle_upload(visible):
|
| 53 |
+
return not visible
|
| 54 |
+
|
| 55 |
+
toggle_upload_btn.click(
|
| 56 |
+
fn=toggle_upload,
|
| 57 |
+
inputs=[file_upload_row],
|
| 58 |
+
outputs=[file_upload_row]
|
| 59 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
|
| 61 |
+
def process_upload(file, history):
|
| 62 |
+
if file:
|
| 63 |
+
try:
|
| 64 |
+
file_path = self.file_upload_folder / file.name
|
| 65 |
+
# Save file
|
| 66 |
+
with open(file_path, 'wb') as f:
|
| 67 |
+
f.write(file.read())
|
| 68 |
+
history.append(str(file_path))
|
| 69 |
+
return gr.update(value=f"File uploaded: {file.name}"), history
|
| 70 |
+
except Exception as e:
|
| 71 |
+
logger.error(f"Upload error: {e}")
|
| 72 |
+
return gr.update(value=f"Upload failed: {str(e)}"), history
|
| 73 |
+
return gr.update(value="No file selected"), history
|
| 74 |
+
|
| 75 |
+
upload_file.change(
|
| 76 |
+
fn=process_upload,
|
| 77 |
+
inputs=[upload_file, file_history],
|
| 78 |
+
outputs=[upload_status, file_history]
|
| 79 |
)
|
|
|
|
| 80 |
|
| 81 |
+
def user_message(message, chat_history, files):
|
| 82 |
+
if files:
|
| 83 |
+
message += f"\nAvailable files for analysis: {', '.join(files)}"
|
| 84 |
+
chat_history.append((message, None))
|
| 85 |
+
return "", chat_history
|
| 86 |
+
|
| 87 |
+
def bot_response(chat_history, files):
|
| 88 |
+
try:
|
| 89 |
+
response = self.agent.process_query(
|
| 90 |
+
chat_history[-1][0],
|
| 91 |
+
available_files=files if files else None
|
| 92 |
)
|
| 93 |
+
chat_history[-1] = (chat_history[-1][0], response)
|
| 94 |
+
return chat_history
|
| 95 |
+
except Exception as e:
|
| 96 |
+
logger.error(f"Error in bot response: {e}")
|
| 97 |
+
chat_history[-1] = (chat_history[-1][0], f"Error: {str(e)}")
|
| 98 |
+
return chat_history
|
| 99 |
+
|
| 100 |
+
# Submit handling
|
| 101 |
+
submit_btn.click(
|
| 102 |
+
user_message,
|
| 103 |
+
[text_input, state, file_history],
|
| 104 |
+
[text_input, chatbot]
|
| 105 |
+
).then(
|
| 106 |
+
bot_response,
|
| 107 |
+
[chatbot, file_history],
|
| 108 |
+
[chatbot]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 109 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 110 |
|
| 111 |
+
# Clear handling
|
| 112 |
+
def clear_chat():
|
| 113 |
+
return [], []
|
| 114 |
+
|
| 115 |
+
clear_btn.click(
|
| 116 |
+
clear_chat,
|
| 117 |
+
None,
|
| 118 |
+
[chatbot, file_history],
|
| 119 |
+
queue=False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 120 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 121 |
|
| 122 |
+
return interface
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 123 |
|
| 124 |
def launch(self, **kwargs):
|
| 125 |
+
interface = self.build_interface()
|
| 126 |
+
interface.launch(**kwargs)
|
| 127 |
+
|
| 128 |
+
if __name__ == "__main__":
|
| 129 |
+
# Example usage
|
| 130 |
+
from agent import ResearchAgent
|
| 131 |
+
agent = ResearchAgent()
|
| 132 |
+
ui = GradioUI(agent)
|
| 133 |
+
ui.launch(share=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
agent.json
CHANGED
|
@@ -2,51 +2,39 @@
|
|
| 2 |
"tools": [
|
| 3 |
"web_search",
|
| 4 |
"visit_webpage",
|
| 5 |
-
"final_answer"
|
|
|
|
| 6 |
],
|
| 7 |
"model": {
|
| 8 |
"class": "HfApiModel",
|
| 9 |
"data": {
|
| 10 |
"max_tokens": 2096,
|
| 11 |
-
"temperature": 0.
|
| 12 |
"last_input_token_count": null,
|
| 13 |
"last_output_token_count": null,
|
| 14 |
-
"model_id": "
|
| 15 |
"custom_role_conversions": null
|
| 16 |
}
|
| 17 |
},
|
| 18 |
"prompt_templates": {
|
| 19 |
-
"system_prompt": "You are an expert assistant
|
| 20 |
"planning": {
|
| 21 |
-
"initial_facts": "Below I will present you a task.\n\nYou will now build a comprehensive preparatory survey of which facts we have at our disposal and which ones we still need.\nTo do so, you will have to read the task and identify things that must be discovered in order to successfully complete it.\nDon't make any assumptions. For each item, provide a thorough reasoning. Here is how you will structure this survey:\n\n---\n### 1. Facts given in the task\nList here the specific facts given in the task that could help you (there might be nothing here).\n\n### 2. Facts to look up\nList here any facts that we may need to look up.\nAlso list where to find each of these, for instance a website, a file... - maybe the task contains some sources that you should re-use here.\n\n### 3. Facts to derive\nList here anything that we want to derive from the above by logical reasoning, for instance computation or simulation.\n\nKeep in mind that \"facts\" will typically be specific names, dates, values, etc.
|
| 22 |
-
"initial_plan": "You are a world expert at
|
| 23 |
-
"update_facts_pre_messages": "You are
|
| 24 |
-
"update_facts_post_messages": "
|
| 25 |
-
"update_plan_pre_messages": "
|
| 26 |
-
"update_plan_post_messages": "
|
| 27 |
-
},
|
| 28 |
-
"managed_agent": {
|
| 29 |
-
"task": "You're a helpful agent named '{{name}}'.\nYou have been submitted this task by your manager.\n---\nTask:\n{{task}}\n---\nYou're helping your manager solve a wider task: so make sure to not provide a one-line answer, but give as much information as possible to give them a clear understanding of the answer.\n\nYour final_answer WILL HAVE to contain these parts:\n### 1. Task outcome (short version):\n### 2. Task outcome (extremely detailed version):\n### 3. Additional context (if relevant):\n\nPut all these in your final_answer tool, everything that you do not pass as an argument to final_answer will be lost.\nAnd even if your task resolution is not successful, please return as much context as possible, so that your manager can act upon this feedback.",
|
| 30 |
-
"report": "Here is the final answer from your managed agent '{{name}}':\n{{final_answer}}"
|
| 31 |
}
|
| 32 |
},
|
| 33 |
"max_steps": 6,
|
| 34 |
"verbosity_level": 1,
|
| 35 |
-
"grammar": null,
|
| 36 |
-
"planning_interval": null,
|
| 37 |
-
"name": null,
|
| 38 |
-
"description": null,
|
| 39 |
"authorized_imports": [
|
| 40 |
"unicodedata",
|
| 41 |
-
"stat",
|
| 42 |
"datetime",
|
| 43 |
-
"random",
|
| 44 |
"pandas",
|
| 45 |
-
"itertools",
|
| 46 |
"math",
|
| 47 |
"statistics",
|
| 48 |
-
"queue",
|
| 49 |
-
"time",
|
| 50 |
"collections",
|
| 51 |
"re"
|
| 52 |
]
|
|
|
|
| 2 |
"tools": [
|
| 3 |
"web_search",
|
| 4 |
"visit_webpage",
|
| 5 |
+
"final_answer",
|
| 6 |
+
"healthcare_llm_visualizer"
|
| 7 |
],
|
| 8 |
"model": {
|
| 9 |
"class": "HfApiModel",
|
| 10 |
"data": {
|
| 11 |
"max_tokens": 2096,
|
| 12 |
+
"temperature": 0.3,
|
| 13 |
"last_input_token_count": null,
|
| 14 |
"last_output_token_count": null,
|
| 15 |
+
"model_id": "mistralai/Mistral-7B-Instruct-v0.2",
|
| 16 |
"custom_role_conversions": null
|
| 17 |
}
|
| 18 |
},
|
| 19 |
"prompt_templates": {
|
| 20 |
+
"system_prompt": "You are an expert research assistant specializing in web search and scientific report writing. Your primary functions are conducting comprehensive web searches and creating well-structured scientific reports.\n\nYour DEFAULT WORKFLOW includes:\n1. Understanding the user query\n2. Performing thorough web searches\n3. Analyzing and synthesizing information\n4. Creating structured scientific reports\n5. Providing proper citations\n\nWhen writing reports, you MUST follow this structure:\n- Executive Summary\n- Introduction\n- Methodology\n- Findings\n- Discussion\n- Conclusion\n- References\n\nYou have access to these tools:\n- web_search: Your PRIMARY tool for information gathering\n- visit_webpage: For detailed analysis of specific pages\n- healthcare_llm_visualizer: For healthcare data visualization (only when explicitly requested)\n- final_answer: For delivering your report\n\nIMPORTANT RULES:\n1. Always start with web_search unless specifically instructed otherwise\n2. Only use other tools when explicitly requested by the user\n3. Maintain academic writing standards\n4. Always cite your sources\n5. Present balanced viewpoints\n6. Acknowledge limitations in your research\n\nTo solve tasks, proceed in cycles of:\n'Thought:' - explain your reasoning\n'Code:' - execute search or requested tool\n'Observation:' - analyze results\n\nEnd with a final_answer containing your structured report.",
|
| 21 |
"planning": {
|
| 22 |
+
"initial_facts": "Below I will present you a task.\n\nYou will now build a comprehensive preparatory survey of which facts we have at our disposal and which ones we still need.\nTo do so, you will have to read the task and identify things that must be discovered in order to successfully complete it.\nDon't make any assumptions. For each item, provide a thorough reasoning. Here is how you will structure this survey:\n\n---\n### 1. Facts given in the task\nList here the specific facts given in the task that could help you (there might be nothing here).\n\n### 2. Facts to look up\nList here any facts that we may need to look up.\nAlso list where to find each of these, for instance a website, a file... - maybe the task contains some sources that you should re-use here.\n\n### 3. Facts to derive\nList here anything that we want to derive from the above by logical reasoning, for instance computation or simulation.\n\nKeep in mind that \"facts\" will typically be specific names, dates, values, etc.",
|
| 23 |
+
"initial_plan": "You are a world expert at planning scientific research using web searches and creating academic reports.\n\nFor the given task, develop a step-by-step high-level plan that focuses on information gathering and report creation.\nPrioritize web searches and only include other tools when explicitly requested.\nAfter writing the final step of the plan, write the '\\n<end_plan>' tag and stop there.",
|
| 24 |
+
"update_facts_pre_messages": "You are updating the facts based on your research progress.\nMaintain academic rigor in fact classification and verification.",
|
| 25 |
+
"update_facts_post_messages": "Based on your research progress, update your facts under these headings:\n### 1. Facts given in the task\n### 2. Facts that we have learned\n### 3. Facts still to look up\n### 4. Facts still to derive",
|
| 26 |
+
"update_plan_pre_messages": "Review your research progress and plan next steps.",
|
| 27 |
+
"update_plan_post_messages": "Update your research plan based on findings so far.\nMaintain focus on web search and academic reporting unless other tools were explicitly requested."
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
}
|
| 29 |
},
|
| 30 |
"max_steps": 6,
|
| 31 |
"verbosity_level": 1,
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
"authorized_imports": [
|
| 33 |
"unicodedata",
|
|
|
|
| 34 |
"datetime",
|
|
|
|
| 35 |
"pandas",
|
|
|
|
| 36 |
"math",
|
| 37 |
"statistics",
|
|
|
|
|
|
|
| 38 |
"collections",
|
| 39 |
"re"
|
| 40 |
]
|
agent.py
ADDED
|
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from smolagents import CodeAgent
|
| 2 |
+
import logging
|
| 3 |
+
from typing import Optional, List, Dict, Any
|
| 4 |
+
from datetime import datetime
|
| 5 |
+
|
| 6 |
+
logger = logging.getLogger(__name__)
|
| 7 |
+
|
| 8 |
+
class ResearchAgent(CodeAgent):
|
| 9 |
+
"""
|
| 10 |
+
Research-focused agent for scientific literature search and analysis.
|
| 11 |
+
Inherits from CodeAgent and specializes in academic research tasks.
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
def __init__(self, model, tools, **kwargs):
|
| 15 |
+
"""
|
| 16 |
+
Initialize the research agent.
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
model: The language model to use
|
| 20 |
+
tools: List of available tools
|
| 21 |
+
**kwargs: Additional arguments passed to CodeAgent
|
| 22 |
+
"""
|
| 23 |
+
super().__init__(model=model, tools=tools, **kwargs)
|
| 24 |
+
self.available_tools = {tool.name: tool for tool in tools}
|
| 25 |
+
logger.info(f"ResearchAgent initialized with tools: {list(self.available_tools.keys())}")
|
| 26 |
+
|
| 27 |
+
def format_research_report(self, content: Dict[str, Any]) -> str:
|
| 28 |
+
"""
|
| 29 |
+
Format research results into a structured report.
|
| 30 |
+
|
| 31 |
+
Args:
|
| 32 |
+
content (Dict[str, Any]): Research content to format
|
| 33 |
+
|
| 34 |
+
Returns:
|
| 35 |
+
str: Formatted research report
|
| 36 |
+
"""
|
| 37 |
+
try:
|
| 38 |
+
# Default sections for research report
|
| 39 |
+
sections = [
|
| 40 |
+
"Executive Summary",
|
| 41 |
+
"Introduction",
|
| 42 |
+
"Methodology",
|
| 43 |
+
"Findings",
|
| 44 |
+
"Discussion",
|
| 45 |
+
"Conclusion",
|
| 46 |
+
"References"
|
| 47 |
+
]
|
| 48 |
+
|
| 49 |
+
# Create report header
|
| 50 |
+
report = [
|
| 51 |
+
"# Науковий звіт",
|
| 52 |
+
f"*Згенеровано: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}*\n"
|
| 53 |
+
]
|
| 54 |
+
|
| 55 |
+
# Add each section
|
| 56 |
+
for section in sections:
|
| 57 |
+
section_content = content.get(section, f"Розділ {section} не надано")
|
| 58 |
+
report.extend([
|
| 59 |
+
f"## {section}",
|
| 60 |
+
section_content,
|
| 61 |
+
"" # Empty line for readability
|
| 62 |
+
])
|
| 63 |
+
|
| 64 |
+
return "\n".join(report)
|
| 65 |
+
|
| 66 |
+
except Exception as e:
|
| 67 |
+
logger.error(f"Error formatting research report: {e}")
|
| 68 |
+
return str(content) # Return raw content if formatting fails
|
| 69 |
+
|
| 70 |
+
def prepare_query_context(self, query: str, available_files: Optional[List[str]] = None) -> str:
|
| 71 |
+
"""
|
| 72 |
+
Prepare the context for the research query.
|
| 73 |
+
|
| 74 |
+
Args:
|
| 75 |
+
query (str): Original research query
|
| 76 |
+
available_files (Optional[List[str]]): List of available files
|
| 77 |
+
|
| 78 |
+
Returns:
|
| 79 |
+
str: Prepared query context
|
| 80 |
+
"""
|
| 81 |
+
context_parts = [
|
| 82 |
+
query,
|
| 83 |
+
"\nІнструкції для виконання:",
|
| 84 |
+
"1. Використовуйте web_search для пошуку актуальної наукової інформації",
|
| 85 |
+
"2. Аналізуйте знайдені джерела та підсумовуйте ключові висновки",
|
| 86 |
+
"3. Формуйте структурований звіт з усіма необхідними розділами",
|
| 87 |
+
"4. Обов'язково вказуйте посилання на використані джерела"
|
| 88 |
+
]
|
| 89 |
+
|
| 90 |
+
if available_files:
|
| 91 |
+
context_parts.append(f"\nДоступні файли для аналізу: {', '.join(available_files)}")
|
| 92 |
+
|
| 93 |
+
return "\n".join(context_parts)
|
| 94 |
+
|
| 95 |
+
def validate_search_results(self, results: str) -> bool:
|
| 96 |
+
"""
|
| 97 |
+
Validate that search results are meaningful.
|
| 98 |
+
|
| 99 |
+
Args:
|
| 100 |
+
results (str): Search results to validate
|
| 101 |
+
|
| 102 |
+
Returns:
|
| 103 |
+
bool: True if results are valid, False otherwise
|
| 104 |
+
"""
|
| 105 |
+
if not results or len(results.strip()) < 100:
|
| 106 |
+
return False
|
| 107 |
+
|
| 108 |
+
# Check for common error indicators
|
| 109 |
+
error_indicators = [
|
| 110 |
+
"no results found",
|
| 111 |
+
"error",
|
| 112 |
+
"failed",
|
| 113 |
+
"unauthorized",
|
| 114 |
+
"invalid"
|
| 115 |
+
]
|
| 116 |
+
|
| 117 |
+
return not any(indicator in results.lower() for indicator in error_indicators)
|
| 118 |
+
|
| 119 |
+
def process_query(self, query: str, available_files: Optional[List[str]] = None) -> str:
|
| 120 |
+
"""
|
| 121 |
+
Process a research query and return formatted results.
|
| 122 |
+
|
| 123 |
+
Args:
|
| 124 |
+
query (str): The research query to process
|
| 125 |
+
available_files (Optional[List[str]]): List of available file paths
|
| 126 |
+
|
| 127 |
+
Returns:
|
| 128 |
+
str: Formatted research results
|
| 129 |
+
"""
|
| 130 |
+
try:
|
| 131 |
+
logger.info(f"Processing research query: {query}")
|
| 132 |
+
|
| 133 |
+
# Prepare context
|
| 134 |
+
context = self.prepare_query_context(query, available_files)
|
| 135 |
+
|
| 136 |
+
# Execute query
|
| 137 |
+
result = self.run(
|
| 138 |
+
task=context,
|
| 139 |
+
stream=False, # We want complete results
|
| 140 |
+
reset=True # Fresh start for each query
|
| 141 |
+
)
|
| 142 |
+
|
| 143 |
+
# Validate results
|
| 144 |
+
if not result:
|
| 145 |
+
return "Не вдалося отримати результати. Будь ласка, спробуйте переформулювати запит."
|
| 146 |
+
|
| 147 |
+
# If result is already a string, return it
|
| 148 |
+
if isinstance(result, str):
|
| 149 |
+
return result
|
| 150 |
+
|
| 151 |
+
# If result is a dict, format it as a report
|
| 152 |
+
if isinstance(result, dict):
|
| 153 |
+
return self.format_research_report(result)
|
| 154 |
+
|
| 155 |
+
# Default case
|
| 156 |
+
return str(result)
|
| 157 |
+
|
| 158 |
+
except Exception as e:
|
| 159 |
+
error_msg = f"Помилка при обробці запиту: {str(e)}"
|
| 160 |
+
logger.error(error_msg)
|
| 161 |
+
return error_msg
|
| 162 |
+
|
| 163 |
+
def add_tool(self, tool) -> None:
|
| 164 |
+
"""
|
| 165 |
+
Add a new tool to the agent's toolkit.
|
| 166 |
+
|
| 167 |
+
Args:
|
| 168 |
+
tool: Tool instance to add
|
| 169 |
+
"""
|
| 170 |
+
try:
|
| 171 |
+
self.available_tools[tool.name] = tool
|
| 172 |
+
self.tools.append(tool)
|
| 173 |
+
logger.info(f"Added new tool: {tool.name}")
|
| 174 |
+
except Exception as e:
|
| 175 |
+
logger.error(f"Error adding tool: {e}")
|
| 176 |
+
raise
|
| 177 |
+
|
| 178 |
+
def remove_tool(self, tool_name: str) -> None:
|
| 179 |
+
"""
|
| 180 |
+
Remove a tool from the agent's toolkit.
|
| 181 |
+
|
| 182 |
+
Args:
|
| 183 |
+
tool_name (str): Name of the tool to remove
|
| 184 |
+
"""
|
| 185 |
+
try:
|
| 186 |
+
if tool_name in self.available_tools:
|
| 187 |
+
tool = self.available_tools.pop(tool_name)
|
| 188 |
+
self.tools.remove(tool)
|
| 189 |
+
logger.info(f"Removed tool: {tool_name}")
|
| 190 |
+
except Exception as e:
|
| 191 |
+
logger.error(f"Error removing tool: {e}")
|
| 192 |
+
raise
|
| 193 |
+
|
| 194 |
+
def __str__(self) -> str:
|
| 195 |
+
"""String representation of the agent"""
|
| 196 |
+
return f"ResearchAgent(tools={list(self.available_tools.keys())})"
|
app.py
CHANGED
|
@@ -1,65 +1,89 @@
|
|
| 1 |
-
from smolagents import
|
| 2 |
-
import
|
| 3 |
-
import requests
|
| 4 |
-
import pytz
|
| 5 |
-
import yaml
|
| 6 |
from tools.final_answer import FinalAnswerTool
|
| 7 |
-
|
| 8 |
from Gradio_UI import GradioUI
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
|
| 10 |
-
#
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
"""
|
| 19 |
-
return "What magic will you build ?"
|
| 20 |
-
|
| 21 |
-
@tool
|
| 22 |
-
def get_current_time_in_timezone(timezone: str) -> str:
|
| 23 |
-
"""A tool that fetches the current local time in a specified timezone.
|
| 24 |
-
Args:
|
| 25 |
-
timezone: A string representing a valid timezone (e.g., 'America/New_York').
|
| 26 |
-
"""
|
| 27 |
-
try:
|
| 28 |
-
# Create timezone object
|
| 29 |
-
tz = pytz.timezone(timezone)
|
| 30 |
-
# Get current time in that timezone
|
| 31 |
-
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
|
| 32 |
-
return f"The current local time in {timezone} is: {local_time}"
|
| 33 |
-
except Exception as e:
|
| 34 |
-
return f"Error fetching time for timezone '{timezone}': {str(e)}"
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
final_answer = FinalAnswerTool()
|
| 38 |
-
model = HfApiModel(
|
| 39 |
-
max_tokens=2096,
|
| 40 |
-
temperature=0.5,
|
| 41 |
-
model_id='https://wxknx1kg971u7k1n.us-east-1.aws.endpoints.huggingface.cloud',# it is possible that this model may be overloaded
|
| 42 |
-
custom_role_conversions=None,
|
| 43 |
)
|
|
|
|
| 44 |
|
|
|
|
|
|
|
| 45 |
|
| 46 |
-
|
| 47 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 64 |
|
| 65 |
-
|
|
|
|
|
|
| 1 |
+
from smolagents import HfApiModel
|
| 2 |
+
from tools.web_search import DuckDuckGoSearchTool
|
|
|
|
|
|
|
|
|
|
| 3 |
from tools.final_answer import FinalAnswerTool
|
| 4 |
+
from tools.healthcare_llm_visualizer import HealthcareLLMVisualizerTool
|
| 5 |
from Gradio_UI import GradioUI
|
| 6 |
+
from agent import ResearchAgent
|
| 7 |
+
import os
|
| 8 |
+
from dotenv import load_dotenv
|
| 9 |
+
import logging
|
| 10 |
|
| 11 |
+
# Configure logging
|
| 12 |
+
logging.basicConfig(
|
| 13 |
+
level=logging.INFO,
|
| 14 |
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
| 15 |
+
handlers=[
|
| 16 |
+
logging.FileHandler('research_agent.log'),
|
| 17 |
+
logging.StreamHandler()
|
| 18 |
+
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
)
|
| 20 |
+
logger = logging.getLogger(__name__)
|
| 21 |
|
| 22 |
+
# Load environment variables
|
| 23 |
+
load_dotenv()
|
| 24 |
|
| 25 |
+
def initialize_tools():
|
| 26 |
+
"""Initialize all available tools with proper configuration"""
|
| 27 |
+
logger.info("Initializing tools...")
|
| 28 |
+
try:
|
| 29 |
+
tools = [
|
| 30 |
+
DuckDuckGoSearchTool(
|
| 31 |
+
max_results=int(os.getenv('MAX_SEARCH_RESULTS', 10))
|
| 32 |
+
),
|
| 33 |
+
FinalAnswerTool(),
|
| 34 |
+
HealthcareLLMVisualizerTool()
|
| 35 |
+
]
|
| 36 |
+
logger.info("Tools initialized successfully")
|
| 37 |
+
return tools
|
| 38 |
+
except Exception as e:
|
| 39 |
+
logger.error(f"Error initializing tools: {e}")
|
| 40 |
+
raise
|
| 41 |
|
| 42 |
+
def initialize_model():
|
| 43 |
+
"""Initialize the language model"""
|
| 44 |
+
logger.info("Initializing language model...")
|
| 45 |
+
try:
|
| 46 |
+
model = HfApiModel(
|
| 47 |
+
model_id=os.getenv('MODEL_ID', "mistralai/Mistral-7B-Instruct-v0.2"),
|
| 48 |
+
token=os.getenv('HF_API_TOKEN'),
|
| 49 |
+
temperature=float(os.getenv('TEMPERATURE', 0.3))
|
| 50 |
+
)
|
| 51 |
+
logger.info(f"Model initialized: {model.model_id}")
|
| 52 |
+
return model
|
| 53 |
+
except Exception as e:
|
| 54 |
+
logger.error(f"Error initializing model: {e}")
|
| 55 |
+
raise
|
| 56 |
|
| 57 |
+
def main():
|
| 58 |
+
"""Main application entry point"""
|
| 59 |
+
try:
|
| 60 |
+
logger.info("Starting Research Agent application...")
|
| 61 |
+
|
| 62 |
+
# Initialize components
|
| 63 |
+
tools = initialize_tools()
|
| 64 |
+
model = initialize_model()
|
| 65 |
+
|
| 66 |
+
# Initialize research agent
|
| 67 |
+
agent = ResearchAgent(
|
| 68 |
+
model=model,
|
| 69 |
+
tools=tools,
|
| 70 |
+
max_steps=int(os.getenv('MAX_STEPS', 6)),
|
| 71 |
+
verbosity_level=int(os.getenv('VERBOSITY_LEVEL', 1))
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
# Launch UI
|
| 75 |
+
logger.info("Launching Gradio interface...")
|
| 76 |
+
ui = GradioUI(agent)
|
| 77 |
+
ui.launch(
|
| 78 |
+
debug=os.getenv('DEBUG_MODE', 'False').lower() == 'true',
|
| 79 |
+
share=os.getenv('SHARE_GRADIO', 'True').lower() == 'true',
|
| 80 |
+
server_name=os.getenv('GRADIO_SERVER_NAME', '0.0.0.0'),
|
| 81 |
+
server_port=int(os.getenv('GRADIO_SERVER_PORT', 7860))
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
except Exception as e:
|
| 85 |
+
logger.error(f"Application startup failed: {e}")
|
| 86 |
+
raise
|
| 87 |
|
| 88 |
+
if __name__ == "__main__":
|
| 89 |
+
main()
|
healthcare_prompts.yaml
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from smolagents.tools import Tool
|
| 2 |
+
|
| 3 |
+
class HealthcareLLMVisualizerTool(Tool):
|
| 4 |
+
name = "healthcare_llm_visualizer"
|
| 5 |
+
description = "Creates interactive visualizations for analyzing LLM applications in Healthcare"
|
| 6 |
+
inputs = {
|
| 7 |
+
'data': {
|
| 8 |
+
'type': 'object',
|
| 9 |
+
'description': 'Data for visualization in format: {"items": [{"category": "name", "value": number}]}'
|
| 10 |
+
}
|
| 11 |
+
}
|
| 12 |
+
output_type = "string"
|
| 13 |
+
|
| 14 |
+
def forward(self, data):
|
| 15 |
+
"""Creates a visualization from the provided data"""
|
| 16 |
+
try:
|
| 17 |
+
# Create React component
|
| 18 |
+
chart_code = """
|
| 19 |
+
import React from 'react';
|
| 20 |
+
import { BarChart, Bar, XAxis, YAxis, CartesianGrid, Tooltip, Legend, ResponsiveContainer } from 'recharts';
|
| 21 |
+
|
| 22 |
+
const HealthcareLLMChart = () => {
|
| 23 |
+
const data = DATA_PLACEHOLDER;
|
| 24 |
+
|
| 25 |
+
return (
|
| 26 |
+
<div className="w-full max-w-4xl mx-auto p-4">
|
| 27 |
+
<h2 className="text-2xl font-bold mb-4">LLM Applications in Healthcare</h2>
|
| 28 |
+
<div className="h-96">
|
| 29 |
+
<ResponsiveContainer width="100%" height="100%">
|
| 30 |
+
<BarChart data={data.items}>
|
| 31 |
+
<CartesianGrid strokeDasharray="3 3" />
|
| 32 |
+
<XAxis dataKey="category" />
|
| 33 |
+
<YAxis />
|
| 34 |
+
<Tooltip />
|
| 35 |
+
<Legend />
|
| 36 |
+
<Bar dataKey="value" fill="#8884d8" />
|
| 37 |
+
</BarChart>
|
| 38 |
+
</ResponsiveContainer>
|
| 39 |
+
</div>
|
| 40 |
+
</div>
|
| 41 |
+
);
|
| 42 |
+
};
|
| 43 |
+
|
| 44 |
+
export default HealthcareLLMChart;
|
| 45 |
+
""".replace('DATA_PLACEHOLDER', str(data))
|
| 46 |
+
|
| 47 |
+
return chart_code
|
| 48 |
+
except Exception as e:
|
| 49 |
+
return f"Error creating visualization: {str(e)}"
|
list_models.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from huggingface_hub import HfApi
|
| 2 |
+
import os
|
| 3 |
+
from dotenv import load_dotenv
|
| 4 |
+
|
| 5 |
+
def list_available_models():
|
| 6 |
+
"""List available models from Hugging Face Hub"""
|
| 7 |
+
load_dotenv()
|
| 8 |
+
|
| 9 |
+
api = HfApi(token=os.getenv('HF_API_TOKEN'))
|
| 10 |
+
|
| 11 |
+
# List models with specific criteria
|
| 12 |
+
models = api.list_models(
|
| 13 |
+
filter=["text-generation"], # Filter for text generation models
|
| 14 |
+
sort="downloads", # Sort by number of downloads
|
| 15 |
+
direction=-1, # Descending order
|
| 16 |
+
limit=10 # Get top 10 models
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
print("\nTop 10 Available Text Generation Models:")
|
| 20 |
+
print("-" * 50)
|
| 21 |
+
for model in models:
|
| 22 |
+
print(f"\nModel ID: {model.modelId}")
|
| 23 |
+
print(f"Downloads: {model.downloads:,}")
|
| 24 |
+
print(f"Likes: {model.likes}")
|
| 25 |
+
print(f"Pipeline Tag: {model.pipeline_tag}")
|
| 26 |
+
print("-" * 30)
|
| 27 |
+
|
| 28 |
+
if __name__ == "__main__":
|
| 29 |
+
list_available_models()
|
test_curl.sh
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
# Load environment variables
|
| 4 |
+
source .env
|
| 5 |
+
|
| 6 |
+
# Make a curl request to the endpoint
|
| 7 |
+
curl -X POST \
|
| 8 |
+
$MODEL_ENDPOINT \
|
| 9 |
+
-H "Authorization: Bearer $HF_API_TOKEN" \
|
| 10 |
+
-H "Content-Type: application/json" \
|
| 11 |
+
-d '{
|
| 12 |
+
"inputs": "Hello! Can you hear me?",
|
| 13 |
+
"parameters": {
|
| 14 |
+
"max_new_tokens": 50,
|
| 15 |
+
"temperature": 0.7
|
| 16 |
+
}
|
| 17 |
+
}'
|
tools/__init__.py
ADDED
|
File without changes
|
tools/final_answer.py
CHANGED
|
@@ -1,14 +1,69 @@
|
|
| 1 |
-
from typing import Any,
|
| 2 |
from smolagents.tools import Tool
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
|
| 4 |
class FinalAnswerTool(Tool):
|
| 5 |
name = "final_answer"
|
| 6 |
-
description = "
|
| 7 |
-
inputs = {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
output_type = "any"
|
| 9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
def forward(self, answer: Any) -> Any:
|
| 11 |
-
return answer
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
|
| 13 |
def __init__(self, *args, **kwargs):
|
| 14 |
self.is_initialized = False
|
|
|
|
|
|
| 1 |
+
from typing import Any, Dict, Union
|
| 2 |
from smolagents.tools import Tool
|
| 3 |
+
import logging
|
| 4 |
+
from datetime import datetime
|
| 5 |
+
|
| 6 |
+
logger = logging.getLogger(__name__)
|
| 7 |
|
| 8 |
class FinalAnswerTool(Tool):
|
| 9 |
name = "final_answer"
|
| 10 |
+
description = "Formats and returns the final research report"
|
| 11 |
+
inputs = {
|
| 12 |
+
'answer': {
|
| 13 |
+
'type': 'any',
|
| 14 |
+
'description': 'The final research report content'
|
| 15 |
+
}
|
| 16 |
+
}
|
| 17 |
output_type = "any"
|
| 18 |
|
| 19 |
+
def _format_report(self, content: Union[str, Dict]) -> str:
|
| 20 |
+
"""Format content as a proper research report"""
|
| 21 |
+
if isinstance(content, str):
|
| 22 |
+
return content
|
| 23 |
+
|
| 24 |
+
required_sections = [
|
| 25 |
+
"Executive Summary",
|
| 26 |
+
"Introduction",
|
| 27 |
+
"Methodology",
|
| 28 |
+
"Findings",
|
| 29 |
+
"Discussion",
|
| 30 |
+
"Conclusion",
|
| 31 |
+
"References"
|
| 32 |
+
]
|
| 33 |
+
|
| 34 |
+
# Ensure all required sections are present
|
| 35 |
+
for section in required_sections:
|
| 36 |
+
if section not in content:
|
| 37 |
+
content[section] = f"{section} section was not provided"
|
| 38 |
+
|
| 39 |
+
# Create formatted report
|
| 40 |
+
report = [
|
| 41 |
+
f"# Research Report",
|
| 42 |
+
f"*Generated on: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}*\n",
|
| 43 |
+
]
|
| 44 |
+
|
| 45 |
+
# Add each section
|
| 46 |
+
for section in required_sections:
|
| 47 |
+
report.extend([
|
| 48 |
+
f"## {section}",
|
| 49 |
+
content[section],
|
| 50 |
+
"" # Empty line for better readability
|
| 51 |
+
])
|
| 52 |
+
|
| 53 |
+
return "\n".join(report)
|
| 54 |
+
|
| 55 |
def forward(self, answer: Any) -> Any:
|
| 56 |
+
"""Process and return the final answer"""
|
| 57 |
+
logger.info("Formatting final research report")
|
| 58 |
+
try:
|
| 59 |
+
formatted_report = self._format_report(answer)
|
| 60 |
+
logger.info("Research report formatted successfully")
|
| 61 |
+
return formatted_report
|
| 62 |
+
except Exception as e:
|
| 63 |
+
error_msg = f"Error formatting research report: {str(e)}"
|
| 64 |
+
logger.error(error_msg)
|
| 65 |
+
return error_msg
|
| 66 |
|
| 67 |
def __init__(self, *args, **kwargs):
|
| 68 |
self.is_initialized = False
|
| 69 |
+
super().__init__(*args, **kwargs)
|
tools/healthcare_llm_visualizer.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict, List, Any
|
| 2 |
+
from smolagents.tools import Tool
|
| 3 |
+
|
| 4 |
+
class HealthcareLLMVisualizerTool(Tool):
|
| 5 |
+
name = "healthcare_llm_visualizer"
|
| 6 |
+
description = "Creates interactive visualizations for analyzing LLM applications in Healthcare"
|
| 7 |
+
inputs = {
|
| 8 |
+
'data': {
|
| 9 |
+
'type': 'object',
|
| 10 |
+
'description': 'Data for visualization in format: {"items": [{"category": "name", "value": number}]}'
|
| 11 |
+
}
|
| 12 |
+
}
|
| 13 |
+
output_type = "string"
|
| 14 |
+
|
| 15 |
+
def prepare_data(self, raw_data: Dict) -> List[Dict[str, Any]]:
|
| 16 |
+
"""Convert raw data into format suitable for visualization"""
|
| 17 |
+
categories = {}
|
| 18 |
+
|
| 19 |
+
# Process trends
|
| 20 |
+
for item in raw_data.get('trends', []):
|
| 21 |
+
category = item['category']
|
| 22 |
+
categories[category] = categories.get(category, 0) + 1
|
| 23 |
+
|
| 24 |
+
# Process implementations
|
| 25 |
+
for item in raw_data.get('implementations', []):
|
| 26 |
+
category = item['category']
|
| 27 |
+
categories[category] = categories.get(category, 0) + 1
|
| 28 |
+
|
| 29 |
+
# Process success cases
|
| 30 |
+
for item in raw_data.get('success_cases', []):
|
| 31 |
+
category = item['category']
|
| 32 |
+
categories[category] = categories.get(category, 0) + 1
|
| 33 |
+
|
| 34 |
+
# Convert to required format
|
| 35 |
+
return {
|
| 36 |
+
"items": [
|
| 37 |
+
{"category": cat, "value": val}
|
| 38 |
+
for cat, val in categories.items()
|
| 39 |
+
]
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
def forward(self, data: Dict) -> str:
|
| 43 |
+
"""Creates a visualization from the provided data"""
|
| 44 |
+
try:
|
| 45 |
+
# Prepare data for visualization
|
| 46 |
+
viz_data = self.prepare_data(data)
|
| 47 |
+
|
| 48 |
+
# Create React component
|
| 49 |
+
chart_code = """
|
| 50 |
+
import React from 'react';
|
| 51 |
+
import { BarChart, Bar, XAxis, YAxis, CartesianGrid, Tooltip, Legend, ResponsiveContainer } from 'recharts';
|
| 52 |
+
|
| 53 |
+
const HealthcareLLMChart = () => {
|
| 54 |
+
const data = DATA_PLACEHOLDER;
|
| 55 |
+
|
| 56 |
+
return (
|
| 57 |
+
<div className="w-full max-w-4xl mx-auto p-4">
|
| 58 |
+
<h2 className="text-2xl font-bold mb-4">LLM Applications in Healthcare</h2>
|
| 59 |
+
<div className="h-96">
|
| 60 |
+
<ResponsiveContainer width="100%" height="100%">
|
| 61 |
+
<BarChart data={data.items}>
|
| 62 |
+
<CartesianGrid strokeDasharray="3 3" />
|
| 63 |
+
<XAxis dataKey="category" />
|
| 64 |
+
<YAxis />
|
| 65 |
+
<Tooltip />
|
| 66 |
+
<Legend />
|
| 67 |
+
<Bar dataKey="value" fill="#8884d8" />
|
| 68 |
+
</BarChart>
|
| 69 |
+
</ResponsiveContainer>
|
| 70 |
+
</div>
|
| 71 |
+
</div>
|
| 72 |
+
);
|
| 73 |
+
};
|
| 74 |
+
|
| 75 |
+
export default HealthcareLLMChart;
|
| 76 |
+
""".replace('DATA_PLACEHOLDER', str(viz_data))
|
| 77 |
+
|
| 78 |
+
return chart_code
|
| 79 |
+
except Exception as e:
|
| 80 |
+
return f"Error creating visualization: {str(e)}"
|
tools/web_search.py
CHANGED
|
@@ -1,11 +1,22 @@
|
|
| 1 |
-
from typing import Any, Optional
|
| 2 |
from smolagents.tools import Tool
|
| 3 |
import duckduckgo_search
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
|
| 5 |
class DuckDuckGoSearchTool(Tool):
|
| 6 |
name = "web_search"
|
| 7 |
-
description = "Performs
|
| 8 |
-
inputs = {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
output_type = "string"
|
| 10 |
|
| 11 |
def __init__(self, max_results=10, **kwargs):
|
|
@@ -15,13 +26,95 @@ class DuckDuckGoSearchTool(Tool):
|
|
| 15 |
from duckduckgo_search import DDGS
|
| 16 |
except ImportError as e:
|
| 17 |
raise ImportError(
|
| 18 |
-
"
|
| 19 |
) from e
|
| 20 |
self.ddgs = DDGS(**kwargs)
|
| 21 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
def forward(self, query: str) -> str:
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Optional, List, Dict
|
| 2 |
from smolagents.tools import Tool
|
| 3 |
import duckduckgo_search
|
| 4 |
+
import logging
|
| 5 |
+
import re
|
| 6 |
+
from datetime import datetime
|
| 7 |
+
from collections import defaultdict
|
| 8 |
+
|
| 9 |
+
logger = logging.getLogger(__name__)
|
| 10 |
|
| 11 |
class DuckDuckGoSearchTool(Tool):
|
| 12 |
name = "web_search"
|
| 13 |
+
description = "Performs comprehensive web searches with focus on academic and scientific sources"
|
| 14 |
+
inputs = {
|
| 15 |
+
'query': {
|
| 16 |
+
'type': 'string',
|
| 17 |
+
'description': 'The search query to perform'
|
| 18 |
+
}
|
| 19 |
+
}
|
| 20 |
output_type = "string"
|
| 21 |
|
| 22 |
def __init__(self, max_results=10, **kwargs):
|
|
|
|
| 26 |
from duckduckgo_search import DDGS
|
| 27 |
except ImportError as e:
|
| 28 |
raise ImportError(
|
| 29 |
+
"Required package `duckduckgo_search` not found. Install with: pip install duckduckgo-search"
|
| 30 |
) from e
|
| 31 |
self.ddgs = DDGS(**kwargs)
|
| 32 |
|
| 33 |
+
def _extract_date(self, text: str) -> Optional[str]:
|
| 34 |
+
"""Extract publication date from text if available"""
|
| 35 |
+
try:
|
| 36 |
+
# Common date patterns
|
| 37 |
+
patterns = [
|
| 38 |
+
r'\b(20\d{2})\b', # Year pattern
|
| 39 |
+
r'\b(19\d{2})\b', # Year pattern for older papers
|
| 40 |
+
r'\b(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)[a-z]* \d{1,2},? 20\d{2}\b'
|
| 41 |
+
]
|
| 42 |
+
|
| 43 |
+
for pattern in patterns:
|
| 44 |
+
match = re.search(pattern, text)
|
| 45 |
+
if match:
|
| 46 |
+
return match.group(0)
|
| 47 |
+
return None
|
| 48 |
+
except Exception as e:
|
| 49 |
+
logger.error(f"Error extracting date: {e}")
|
| 50 |
+
return None
|
| 51 |
+
|
| 52 |
+
def _parse_search_result(self, result: Dict[str, str]) -> Dict[str, Any]:
|
| 53 |
+
"""Parse a single search result safely"""
|
| 54 |
+
try:
|
| 55 |
+
title = result.get('title', '').strip()
|
| 56 |
+
url = result.get('link', '')
|
| 57 |
+
description = result.get('body', '').strip()
|
| 58 |
+
date = self._extract_date(description) or 'Date not found'
|
| 59 |
+
|
| 60 |
+
return {
|
| 61 |
+
'title': title,
|
| 62 |
+
'url': url,
|
| 63 |
+
'description': description,
|
| 64 |
+
'date': date
|
| 65 |
+
}
|
| 66 |
+
except Exception as e:
|
| 67 |
+
logger.error(f"Error parsing search result: {e}")
|
| 68 |
+
return {}
|
| 69 |
+
|
| 70 |
+
def _format_results(self, results: List[Dict[str, str]]) -> str:
|
| 71 |
+
"""Format search results with academic focus"""
|
| 72 |
+
if not results:
|
| 73 |
+
return "No results found. Consider refining your search terms."
|
| 74 |
+
|
| 75 |
+
formatted_output = ["## Search Results\n"]
|
| 76 |
+
|
| 77 |
+
for result in results:
|
| 78 |
+
parsed = self._parse_search_result(result)
|
| 79 |
+
if parsed and parsed.get('title'):
|
| 80 |
+
formatted_output.extend([
|
| 81 |
+
f"### {parsed['title']}",
|
| 82 |
+
f"**Source:** [{parsed['url']}]({parsed['url']})",
|
| 83 |
+
f"**Date:** {parsed['date']}",
|
| 84 |
+
f"**Summary:** {parsed['description']}\n"
|
| 85 |
+
])
|
| 86 |
+
|
| 87 |
+
return "\n".join(formatted_output)
|
| 88 |
+
|
| 89 |
def forward(self, query: str) -> str:
|
| 90 |
+
"""Execute search and return formatted results"""
|
| 91 |
+
logger.info(f"Performing web search for query: {query}")
|
| 92 |
+
try:
|
| 93 |
+
# Add academic focus to search if not present
|
| 94 |
+
academic_terms = ['research', 'study', 'journal', 'paper']
|
| 95 |
+
if not any(term in query.lower() for term in academic_terms):
|
| 96 |
+
query = f"{query} research study"
|
| 97 |
+
|
| 98 |
+
# Execute search with error handling
|
| 99 |
+
try:
|
| 100 |
+
results = list(self.ddgs.text(query, max_results=self.max_results))
|
| 101 |
+
if not results:
|
| 102 |
+
return "No results found. Try modifying your search terms."
|
| 103 |
+
except Exception as e:
|
| 104 |
+
logger.error(f"Search execution error: {e}")
|
| 105 |
+
return f"Error performing search: {str(e)}"
|
| 106 |
+
|
| 107 |
+
# Format results
|
| 108 |
+
formatted_output = self._format_results(results)
|
| 109 |
+
logger.info("Search completed successfully")
|
| 110 |
+
return formatted_output
|
| 111 |
+
|
| 112 |
+
except Exception as e:
|
| 113 |
+
error_msg = f"Error during web search: {str(e)}"
|
| 114 |
+
logger.error(error_msg)
|
| 115 |
+
return error_msg
|
| 116 |
+
|
| 117 |
+
def __del__(self):
|
| 118 |
+
"""Cleanup when object is destroyed"""
|
| 119 |
+
if hasattr(self, 'ddgs'):
|
| 120 |
+
self.ddgs = None
|