Spaces:
Sleeping
Sleeping
Commit
Β·
67a3f70
1
Parent(s):
368277b
fix: Update chat message handling
Browse files
src/interfaces/gradio_interface.py
CHANGED
|
@@ -1,5 +1,4 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
-
from gradio import ChatMessage
|
| 3 |
from typing import Generator, List
|
| 4 |
import json
|
| 5 |
import os
|
|
@@ -271,23 +270,41 @@ In the fast-moving world of AI and technology, staying informed while managing i
|
|
| 271 |
return about_tab
|
| 272 |
|
| 273 |
|
| 274 |
-
def chat_with_agent(message: str, history: List
|
| 275 |
"""
|
| 276 |
Chat with the agent using custom streaming functionality for real-time thinking display
|
| 277 |
"""
|
| 278 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 279 |
# Reset memory for long conversations to prevent token overflow
|
| 280 |
-
reset_memory = len(
|
| 281 |
|
| 282 |
# Start with user message in history
|
| 283 |
-
new_history =
|
| 284 |
|
| 285 |
# Show initial thinking message
|
| 286 |
-
thinking_message =
|
| 287 |
-
role
|
| 288 |
-
content
|
| 289 |
-
|
| 290 |
-
)
|
| 291 |
new_history.append(thinking_message)
|
| 292 |
yield new_history
|
| 293 |
|
|
@@ -321,8 +338,7 @@ def chat_with_agent(message: str, history: List[dict]) -> Generator[List[dict],
|
|
| 321 |
obs_text += "..."
|
| 322 |
step_content += f"ποΈ **Observation:** {obs_text}\n\n"
|
| 323 |
|
| 324 |
-
thinking_message
|
| 325 |
-
thinking_message.metadata = {"title": f"π€ Agent Step {step.step_number}", "status": "processing"}
|
| 326 |
new_history[-1] = thinking_message
|
| 327 |
yield new_history
|
| 328 |
|
|
@@ -330,8 +346,7 @@ def chat_with_agent(message: str, history: List[dict]) -> Generator[List[dict],
|
|
| 330 |
# If streaming fails, fall back to regular execution
|
| 331 |
print(f"Streaming failed: {stream_error}, falling back to regular execution")
|
| 332 |
|
| 333 |
-
thinking_message
|
| 334 |
-
thinking_message.metadata = {"title": "π€ Agent Thinking", "status": "processing"}
|
| 335 |
new_history[-1] = thinking_message
|
| 336 |
yield new_history
|
| 337 |
|
|
@@ -385,34 +400,28 @@ def chat_with_agent(message: str, history: List[dict]) -> Generator[List[dict],
|
|
| 385 |
tool_usage_content = "Agent executed actions successfully"
|
| 386 |
|
| 387 |
# Update thinking to show completion
|
| 388 |
-
thinking_message
|
| 389 |
"π§ **Agent Complete**\n\nβ
Request processed successfully\nβ
Response prepared"
|
| 390 |
)
|
| 391 |
-
thinking_message.metadata = {"title": "π€ Agent Thinking", "status": "done"}
|
| 392 |
new_history[-1] = thinking_message
|
| 393 |
yield new_history
|
| 394 |
|
| 395 |
# Add tool usage message if there were tools used
|
| 396 |
if tool_usage_content:
|
| 397 |
-
tool_message =
|
| 398 |
-
role="assistant",
|
| 399 |
-
content=f"π οΈ **Tools & Actions Used**\n\n{tool_usage_content}",
|
| 400 |
-
metadata={"title": "π οΈ Agent Tools", "status": "done"},
|
| 401 |
-
)
|
| 402 |
new_history.append(tool_message)
|
| 403 |
yield new_history
|
| 404 |
|
| 405 |
# Add final response
|
| 406 |
final_response = str(result) if result else "I couldn't process your request."
|
| 407 |
-
final_message =
|
| 408 |
new_history.append(final_message)
|
| 409 |
yield new_history
|
| 410 |
return
|
| 411 |
|
| 412 |
# If we get here, streaming worked, so get the final result
|
| 413 |
# The streaming should have shown all the steps, now get final answer
|
| 414 |
-
thinking_message
|
| 415 |
-
thinking_message.metadata = {"title": "π€ Agent Thinking", "status": "done"}
|
| 416 |
new_history[-1] = thinking_message
|
| 417 |
yield new_history
|
| 418 |
|
|
@@ -424,19 +433,18 @@ def chat_with_agent(message: str, history: List[dict]) -> Generator[List[dict],
|
|
| 424 |
if hasattr(last_step, "observations") and last_step.observations:
|
| 425 |
final_response = str(last_step.observations)
|
| 426 |
|
| 427 |
-
final_message =
|
| 428 |
new_history.append(final_message)
|
| 429 |
yield new_history
|
| 430 |
|
| 431 |
except Exception as e:
|
| 432 |
# Fallback error handling
|
| 433 |
-
error_message =
|
| 434 |
-
role
|
| 435 |
-
content
|
| 436 |
-
|
| 437 |
-
)
|
| 438 |
-
|
| 439 |
-
yield history
|
| 440 |
|
| 441 |
|
| 442 |
# Create the main chat interface
|
|
|
|
| 1 |
import gradio as gr
|
|
|
|
| 2 |
from typing import Generator, List
|
| 3 |
import json
|
| 4 |
import os
|
|
|
|
| 270 |
return about_tab
|
| 271 |
|
| 272 |
|
| 273 |
+
def chat_with_agent(message: str, history: List) -> Generator[List, None, None]:
|
| 274 |
"""
|
| 275 |
Chat with the agent using custom streaming functionality for real-time thinking display
|
| 276 |
"""
|
| 277 |
try:
|
| 278 |
+
# Convert history to proper format if needed
|
| 279 |
+
if history is None:
|
| 280 |
+
history = []
|
| 281 |
+
|
| 282 |
+
# Ensure all history items are properly formatted
|
| 283 |
+
formatted_history = []
|
| 284 |
+
for item in history:
|
| 285 |
+
if isinstance(item, dict):
|
| 286 |
+
# Already a dict, check if it has required keys
|
| 287 |
+
if "role" in item and "content" in item:
|
| 288 |
+
formatted_history.append(item)
|
| 289 |
+
else:
|
| 290 |
+
# Try to convert from ChatMessage format
|
| 291 |
+
if hasattr(item, "role") and hasattr(item, "content"):
|
| 292 |
+
formatted_history.append({"role": item.role, "content": item.content})
|
| 293 |
+
elif hasattr(item, "role") and hasattr(item, "content"):
|
| 294 |
+
# ChatMessage object
|
| 295 |
+
formatted_history.append({"role": item.role, "content": item.content})
|
| 296 |
+
|
| 297 |
# Reset memory for long conversations to prevent token overflow
|
| 298 |
+
reset_memory = len(formatted_history) > 10 # Reset after 5 user-assistant exchanges
|
| 299 |
|
| 300 |
# Start with user message in history
|
| 301 |
+
new_history = formatted_history.copy()
|
| 302 |
|
| 303 |
# Show initial thinking message
|
| 304 |
+
thinking_message = {
|
| 305 |
+
"role": "assistant",
|
| 306 |
+
"content": "π§ **Agent Planning**\n\nAnalyzing your request and creating execution plan...",
|
| 307 |
+
}
|
|
|
|
| 308 |
new_history.append(thinking_message)
|
| 309 |
yield new_history
|
| 310 |
|
|
|
|
| 338 |
obs_text += "..."
|
| 339 |
step_content += f"ποΈ **Observation:** {obs_text}\n\n"
|
| 340 |
|
| 341 |
+
thinking_message["content"] = step_content
|
|
|
|
| 342 |
new_history[-1] = thinking_message
|
| 343 |
yield new_history
|
| 344 |
|
|
|
|
| 346 |
# If streaming fails, fall back to regular execution
|
| 347 |
print(f"Streaming failed: {stream_error}, falling back to regular execution")
|
| 348 |
|
| 349 |
+
thinking_message["content"] = "π§ **Agent Working**\n\nProcessing your request using available tools..."
|
|
|
|
| 350 |
new_history[-1] = thinking_message
|
| 351 |
yield new_history
|
| 352 |
|
|
|
|
| 400 |
tool_usage_content = "Agent executed actions successfully"
|
| 401 |
|
| 402 |
# Update thinking to show completion
|
| 403 |
+
thinking_message["content"] = (
|
| 404 |
"π§ **Agent Complete**\n\nβ
Request processed successfully\nβ
Response prepared"
|
| 405 |
)
|
|
|
|
| 406 |
new_history[-1] = thinking_message
|
| 407 |
yield new_history
|
| 408 |
|
| 409 |
# Add tool usage message if there were tools used
|
| 410 |
if tool_usage_content:
|
| 411 |
+
tool_message = {"role": "assistant", "content": f"π οΈ **Tools & Actions Used**\n\n{tool_usage_content}"}
|
|
|
|
|
|
|
|
|
|
|
|
|
| 412 |
new_history.append(tool_message)
|
| 413 |
yield new_history
|
| 414 |
|
| 415 |
# Add final response
|
| 416 |
final_response = str(result) if result else "I couldn't process your request."
|
| 417 |
+
final_message = {"role": "assistant", "content": final_response}
|
| 418 |
new_history.append(final_message)
|
| 419 |
yield new_history
|
| 420 |
return
|
| 421 |
|
| 422 |
# If we get here, streaming worked, so get the final result
|
| 423 |
# The streaming should have shown all the steps, now get final answer
|
| 424 |
+
thinking_message["content"] = "π§ **Agent Complete**\n\nβ
All steps executed\nβ
Preparing final response"
|
|
|
|
| 425 |
new_history[-1] = thinking_message
|
| 426 |
yield new_history
|
| 427 |
|
|
|
|
| 433 |
if hasattr(last_step, "observations") and last_step.observations:
|
| 434 |
final_response = str(last_step.observations)
|
| 435 |
|
| 436 |
+
final_message = {"role": "assistant", "content": final_response}
|
| 437 |
new_history.append(final_message)
|
| 438 |
yield new_history
|
| 439 |
|
| 440 |
except Exception as e:
|
| 441 |
# Fallback error handling
|
| 442 |
+
error_message = {
|
| 443 |
+
"role": "assistant",
|
| 444 |
+
"content": f"β **System Error:** {str(e)}\n\nPlease try again with a different approach.",
|
| 445 |
+
}
|
| 446 |
+
new_history.append(error_message)
|
| 447 |
+
yield new_history
|
|
|
|
| 448 |
|
| 449 |
|
| 450 |
# Create the main chat interface
|