import os
import sys
import json
import argparse
import time
import uuid
import subprocess
import requests
from typing import List, Dict, Any, Iterator
from dotenv import load_dotenv
load_dotenv()
import gradio as gr
from gradio import ChatMessage
# Import AgentFlow modules
from agentflow.models.initializer import Initializer
from agentflow.models.planner import Planner
from agentflow.models.memory import Memory
from agentflow.models.executor import Executor
from agentflow.models.utils import make_json_serializable_truncated
from pathlib import Path
from huggingface_hub import CommitScheduler
import spaces
# Get Huggingface token from environment variable
HF_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
########### Test Huggingface Dataset ###########
# Update the HuggingFace dataset constants
DATASET_DIR = Path("solver_cache") # the directory to save the dataset
DATASET_DIR.mkdir(parents=True, exist_ok=True)
global QUERY_ID
QUERY_ID = None
TOOL_NAME_MAPPING = {
"Generalist_Solution_Generator_Tool": "Base_Generator_Tool",
"Ground_Google_Search_Tool": "Google_Search_Tool",
"Python_Code_Generator_Tool": "Python_Coder_Tool",
"Web_RAG_Search_Tool": "Web_Search_Tool",
"Wikipedia_RAG_Search_Tool": "Wikipedia_Search_Tool"
}
# Enable scheduler to record data to HuggingFace dataset
# scheduler = None
scheduler = CommitScheduler(
repo_id="ZhuofengLi/AgentFlow-Gradio-Demo-User-Data",
repo_type="dataset",
folder_path=DATASET_DIR,
path_in_repo="solver_cache", # Update path in repo
token=HF_TOKEN
)
########### vLLM Service Management ###########
VLLM_MODEL_NAME = "AgentFlow/agentflow-planner-7b"
VLLM_PORT = 8000
VLLM_HOST = "localhost"
VLLM_PROCESS = None
def check_vllm_service() -> bool:
"""Check if vLLM service is running"""
try:
response = requests.get(f"http://{VLLM_HOST}:{VLLM_PORT}/v1/models", timeout=2)
return response.status_code == 200
except:
return False
def start_vllm_service() -> bool:
"""Start vLLM service in background"""
global VLLM_PROCESS
if check_vllm_service():
print(f"π’ vLLM service already running on port {VLLM_PORT}")
return True
try:
print(f"π Starting vLLM service for {VLLM_MODEL_NAME}...")
# Start vLLM server in background
VLLM_PROCESS = subprocess.Popen(
[
"vllm", "serve", VLLM_MODEL_NAME,
"--port", str(VLLM_PORT),
"--host", VLLM_HOST,
"--tensor-parallel-size", "1",
"--gpu-memory-utilization", "0.95"
],
text=True
)
# Wait for service to be ready (max 60 seconds)
for i in range(180):
time.sleep(1)
if check_vllm_service():
print(f"π’ vLLM service started successfully on port {VLLM_PORT}")
return True
print("β οΈ vLLM service failed to start within 60 seconds")
return False
except Exception as e:
print(f"β Failed to start vLLM service: {e}")
return False
def stop_vllm_service():
"""Stop vLLM service if running"""
global VLLM_PROCESS
if VLLM_PROCESS:
VLLM_PROCESS.terminate()
VLLM_PROCESS.wait()
print("π vLLM service stopped")
def get_vllm_status() -> str:
"""Get vLLM service status message"""
if check_vllm_service():
return f"π’ vLLM service running on port {VLLM_PORT}"
else:
return f"β οΈ vLLM service not running"
########### End of vLLM Service Management ###########
def save_query_data(query_id: str, query: str) -> None:
"""Save query data to dataset"""
# Save query metadata
query_cache_dir = DATASET_DIR / query_id
query_cache_dir.mkdir(parents=True, exist_ok=True)
query_file = query_cache_dir / "query_metadata.json"
query_metadata = {
"query_id": query_id,
"query_text": query,
"datetime": time.strftime("%Y%m%d_%H%M%S"),
}
print(f"Saving query metadata to {query_file}")
with query_file.open("w") as f:
json.dump(query_metadata, f, indent=4)
def save_feedback(query_id: str, feedback_type: str, feedback_text: str = None) -> None:
"""
Save user feedback to the query directory.
Args:
query_id: Unique identifier for the query
feedback_type: Type of feedback ('upvote', 'downvote', or 'comment')
feedback_text: Optional text feedback from user
"""
feedback_data_dir = DATASET_DIR / query_id
feedback_data_dir.mkdir(parents=True, exist_ok=True)
feedback_data = {
"query_id": query_id,
"feedback_type": feedback_type,
"feedback_text": feedback_text,
"datetime": time.strftime("%Y%m%d_%H%M%S")
}
# Save feedback in the query directory
feedback_file = feedback_data_dir / "feedback.json"
print(f"Saving feedback to {feedback_file}")
# If feedback file exists, update it
if feedback_file.exists():
with feedback_file.open("r") as f:
existing_feedback = json.load(f)
# Convert to list if it's a single feedback entry
if not isinstance(existing_feedback, list):
existing_feedback = [existing_feedback]
existing_feedback.append(feedback_data)
feedback_data = existing_feedback
# Write feedback data
with feedback_file.open("w") as f:
json.dump(feedback_data, f, indent=4)
def save_steps_data(query_id: str, memory: Memory) -> None:
"""Save steps data to Huggingface dataset"""
steps_file = DATASET_DIR / query_id / "all_steps.json"
memory_actions = memory.get_actions()
memory_actions = make_json_serializable_truncated(memory_actions) # NOTE: make the memory actions serializable
print("Memory actions: ", memory_actions)
with steps_file.open("w") as f:
json.dump(memory_actions, f, indent=4)
def save_module_data(query_id: str, key: str, value: Any) -> None:
"""Save module data to Huggingface dataset"""
try:
key = key.replace(" ", "_").lower()
module_file = DATASET_DIR / query_id / f"{key}.json"
value = make_json_serializable_truncated(value) # NOTE: make the value serializable
with module_file.open("a") as f:
json.dump(value, f, indent=4)
except Exception as e:
print(f"Warning: Failed to save as JSON: {e}")
# Fallback to saving as text file
text_file = DATASET_DIR / query_id / f"{key}.txt"
try:
with text_file.open("a") as f:
f.write(str(value) + "\n")
print(f"Successfully saved as text file: {text_file}")
except Exception as e:
print(f"Error: Failed to save as text file: {e}")
########### End of Test Huggingface Dataset ###########
class Solver:
def __init__(
self,
planner,
memory,
executor,
output_types: str = "base,final,direct",
index: int = 0,
verbose: bool = True,
max_steps: int = 10,
max_time: int = 60,
query_cache_dir: str = "solver_cache"
):
self.planner = planner
self.memory = memory
self.executor = executor
self.index = index
self.verbose = verbose
self.max_steps = max_steps
self.max_time = max_time
self.query_cache_dir = query_cache_dir
self.output_types = output_types.lower().split(',')
assert all(output_type in ["base", "final", "direct"] for output_type in self.output_types), "Invalid output type. Supported types are 'base', 'final', 'direct'."
def stream_solve_user_problem(self, user_query: str, messages: List[ChatMessage]) -> Iterator[List[ChatMessage]]:
"""
Streams intermediate thoughts and final responses for the problem-solving process based on user input.
Args:
user_query (str): The text query input from the user.
messages (list): A list of ChatMessage objects to store the streamed responses.
"""
img_path = None # AgentFlow doesn't use images in this demo
# Set tool cache directory
_tool_cache_dir = os.path.join(self.query_cache_dir, "tool_cache") # NOTE: This is the directory for tool cache
self.executor.set_query_cache_dir(_tool_cache_dir) # NOTE: set query cache directory
# Step 1: Display the received inputs
messages.append(ChatMessage(role="assistant", content=f"### π Received Query:\n{user_query}"))
yield messages
# # Step 2: Add "thinking" status while processing
# messages.append(ChatMessage(
# role="assistant",
# content="",
# metadata={"title": "β³ Thinking: Processing input..."}
# ))
# [Step 3] Initialize problem-solving state
start_time = time.time()
step_count = 0
json_data = {"query": user_query, "image": "Image received as bytes"}
messages.append(ChatMessage(role="assistant", content="
"))
messages.append(ChatMessage(role="assistant", content="### π§ Reasoning Steps from AgentFlow (Deep Reasoning...)"))
yield messages
# [Step 4] Query Analysis
query_analysis = self.planner.analyze_query(user_query, img_path)
json_data["query_analysis"] = query_analysis # TODO: update
# Format the query analysis for display
query_analysis_display = query_analysis.replace("Concise Summary:", "**Concise Summary:**\n")
query_analysis_display = query_analysis_display.replace("Required Skills:", "**Required Skills:**")
query_analysis_display = query_analysis_display.replace("Relevant Tools:", "**Relevant Tools:**")
query_analysis_display = query_analysis_display.replace("Additional Considerations:", "**Additional Considerations:**")
# Map tool names in query analysis for display
for original_name, display_name in TOOL_NAME_MAPPING.items():
query_analysis_display = query_analysis_display.replace(original_name, display_name)
messages.append(ChatMessage(role="assistant",
content=f"{query_analysis_display}",
metadata={"title": "### π Step 0: Query Analysis"}))
yield messages
# Save the query analysis data
query_analysis_data = {
"query_analysis": query_analysis,
"time": round(time.time() - start_time, 5)
}
save_module_data(QUERY_ID, "step_0_query_analysis", query_analysis_data)
# Execution loop (similar to your step-by-step solver)
while step_count < self.max_steps and (time.time() - start_time) < self.max_time:
step_count += 1
messages.append(ChatMessage(role="AgentFlow",
content=f"Generating the {step_count}-th step...",
metadata={"title": f"π Step {step_count}"}))
yield messages
# [Step 5] Generate the next step
next_step = self.planner.generate_next_step(
user_query, img_path, query_analysis, self.memory, step_count, self.max_steps, json_data
)
context, sub_goal, tool_name = self.planner.extract_context_subgoal_and_tool(next_step) # TODO: update
step_data = {
"step_count": step_count,
"context": context,
"sub_goal": sub_goal,
"tool_name": tool_name,
"time": round(time.time() - start_time, 5)
}
save_module_data(QUERY_ID, f"step_{step_count}_action_prediction", step_data)
# Display the step information
display_tool_name = TOOL_NAME_MAPPING.get(tool_name, tool_name)
# Map tool names in context and sub_goal for display
context_display = context if context else ""
sub_goal_display = sub_goal if sub_goal else ""
for original_name, display_name in TOOL_NAME_MAPPING.items():
context_display = context_display.replace(original_name, display_name)
sub_goal_display = sub_goal_display.replace(original_name, display_name)
messages.append(ChatMessage(
role="assistant",
content=f"**Context:** {context_display}\n\n**Sub-goal:** {sub_goal_display}\n\n**Tool:** `{display_tool_name}`",
metadata={"title": f"### π― Step {step_count}: Action Prediction ({display_tool_name})"}))
yield messages
# Handle tool execution or errors
if tool_name not in self.planner.available_tools:
display_tool_name = TOOL_NAME_MAPPING.get(tool_name, tool_name)
messages.append(ChatMessage(
role="assistant",
content=f"β οΈ Error: Tool '{display_tool_name}' is not available."))
yield messages
continue
# [Step 6-7] Generate and execute the tool command
tool_command = self.executor.generate_tool_command(
user_query, img_path, context, sub_goal, tool_name, self.planner.toolbox_metadata[tool_name], step_count, json_data
)
analysis, explanation, command = self.executor.extract_explanation_and_command(tool_command)
result = self.executor.execute_tool_command(tool_name, command)
result = make_json_serializable_truncated(result)
# Display the ommand generation information
display_tool_name = TOOL_NAME_MAPPING.get(tool_name, tool_name)
messages.append(ChatMessage(
role="assistant",
content=f"**Command:**\n```python\n{command}\n```",
metadata={"title": f"### π Step {step_count}: Command Generation ({display_tool_name})"}))
yield messages
# Save the command generation data
command_generation_data = {
"analysis": analysis,
"explanation": explanation,
"command": command,
"time": round(time.time() - start_time, 5)
}
save_module_data(QUERY_ID, f"step_{step_count}_command_generation", command_generation_data)
# Display the command execution result
display_tool_name = TOOL_NAME_MAPPING.get(tool_name, tool_name)
# Map tool names in result for display
result_json_str = json.dumps(result, indent=4)
for original_name, display_name in TOOL_NAME_MAPPING.items():
result_json_str = result_json_str.replace(original_name, display_name)
messages.append(ChatMessage(
role="assistant",
content=f"**Result:**\n```json\n{result_json_str}\n```",
# content=f"**Result:**\n```json\n{result}\n```",
metadata={"title": f"### β‘ Step {step_count}: Command Execution ({display_tool_name})"}))
yield messages
# Save the command execution data
command_execution_data = {
"result": result,
"time": round(time.time() - start_time, 5)
}
save_module_data(QUERY_ID, f"step_{step_count}_command_execution", command_execution_data)
# [Step 8] Memory update and stopping condition
self.memory.add_action(step_count, tool_name, sub_goal, command, result) # TODO: do not update here
stop_verification = self.planner.verificate_context(user_query, img_path, query_analysis, self.memory, step_count, json_data)
context_verification, conclusion = self.planner.extract_conclusion(stop_verification)
# Save the context verification data
context_verification_data = {
"stop_verification": context_verification,
"conclusion": conclusion,
"time": round(time.time() - start_time, 5)
}
save_module_data(QUERY_ID, f"step_{step_count}_context_verification", context_verification_data)
# Display the context verification result # TODO: update context_verification
# Map tool names in context verification for display
context_verification_display = context_verification if context_verification else ""
for original_name, display_name in TOOL_NAME_MAPPING.items():
context_verification_display = context_verification_display.replace(original_name, display_name)
conclusion_emoji = "β
" if conclusion == 'STOP' else "π"
messages.append(ChatMessage(
role="assistant",
content=f"**Analysis:**\n{context_verification_display}\n\n**Conclusion:** `{conclusion}` {conclusion_emoji}",
metadata={"title": f"### π€ Step {step_count}: Context Verification"}))
yield messages
if conclusion == 'STOP':
break
# Step 7: Generate Final Output (if needed)
if 'direct' in self.output_types:
messages.append(ChatMessage(role="assistant", content="
"))
direct_output = self.planner.generate_direct_output(user_query, img_path, self.memory) # TODO: update
# Map tool names in direct output for display
direct_output_display = direct_output if direct_output else ""
for original_name, display_name in TOOL_NAME_MAPPING.items():
direct_output_display = direct_output_display.replace(original_name, display_name)
messages.append(ChatMessage(role="assistant", content=f"### π Final Answer:\n{direct_output_display}"))
yield messages
# Save the direct output data
direct_output_data = {
"direct_output": direct_output,
"time": round(time.time() - start_time, 5)
}
save_module_data(QUERY_ID, "direct_output", direct_output_data)
if 'final' in self.output_types:
final_output = self.planner.generate_final_output(user_query, img_path, self.memory) # Disabled visibility for now
# messages.append(ChatMessage(role="assistant", content=f"π― Final Output:\n{final_output}"))
# yield messages
# Save the final output data
final_output_data = {
"final_output": final_output,
"time": round(time.time() - start_time, 5)
}
save_module_data(QUERY_ID, "final_output", final_output_data)
# Step 8: Completion Message
messages.append(ChatMessage(role="assistant", content="
"))
messages.append(ChatMessage(role="assistant", content="### β¨ Query Solved!"))
messages.append(ChatMessage(role="assistant", content="How do you like the output from AgentFlow ππ«? Please give us your feedback below. \n\nπ If the answer is correct or the reasoning steps are helpful, please upvote the output. \nπ If it is incorrect or the reasoning steps are not helpful, please downvote the output. \nπ¬ If you have any suggestions or comments, please leave them below.\n\nThank you for using AgentFlow! ππ«"))
yield messages
def parse_arguments():
parser = argparse.ArgumentParser(description="Run the AgentFlow demo with specified parameters.")
parser.add_argument("--llm_engine_name", default="gpt-4o", help="LLM engine name.")
parser.add_argument("--max_tokens", type=int, default=2000, help="Maximum tokens for LLM generation.")
parser.add_argument(
"--output_types",
default="base,final,direct",
help="Comma-separated list of required outputs (base,final,direct)"
)
parser.add_argument("--enabled_tools", default="Base_Generator_Tool", help="List of enabled tools.")
parser.add_argument("--root_cache_dir", default="solver_cache", help="Path to solver cache directory.")
parser.add_argument("--query_id", default=None, help="Query ID.")
parser.add_argument("--verbose", type=bool, default=True, help="Enable verbose output.")
# NOTE: Add new arguments
parser.add_argument("--run_baseline_only", type=bool, default=False, help="Run only the baseline (no toolbox).")
parser.add_argument("--openai_api_source", default="we_provided", choices=["we_provided", "user_provided"], help="Source of OpenAI API key.")
return parser.parse_args()
@spaces.GPU(duration=300)
def solve_problem_gradio(user_query, max_steps=10, max_time=60, llm_model_engine=None, enabled_tools=None):
"""
Wrapper function to connect the solver to Gradio.
Streams responses from `solver.stream_solve_user_problem` for real-time UI updates.
"""
# Check if query is empty
if not user_query or not user_query.strip():
yield [ChatMessage(role="assistant", content="β Error: Please enter a question before submitting.")]
return
# Generate Unique Query ID (Date and first 8 characters of UUID)
query_id = time.strftime("%Y%m%d_%H%M%S") + "_" + str(uuid.uuid4())[:8] # e.g, 20250217_062225_612f2474
print(f"Query ID: {query_id}")
# NOTE: update the global variable to save the query ID
global QUERY_ID
QUERY_ID = query_id
# Create a directory for the query ID
query_cache_dir = os.path.join(DATASET_DIR.name, query_id) # NOTE
os.makedirs(query_cache_dir, exist_ok=True)
# if api_key is None:
# return [["assistant", "β Error: OpenAI API Key is required."]]
# Save the query data
save_query_data(
query_id=query_id,
query=user_query
)
# Filter out Web_Search_Tool (frontend only, not actually used)
if enabled_tools and "Web_Search_Tool" in enabled_tools:
enabled_tools = [tool for tool in enabled_tools if tool != "Web_Search_Tool"]
# Instantiate Initializer
initializer = Initializer(
enabled_tools=enabled_tools,
tool_engine=["Default"] * len(enabled_tools) if enabled_tools else ["Default"],
model_string=llm_model_engine,
verbose=False
)
# Instantiate Planner
planner = Planner(
llm_engine_name=llm_model_engine,
toolbox_metadata=initializer.toolbox_metadata,
available_tools=initializer.available_tools,
verbose=False,
temperature=0.7
)
# Instantiate Memory
memory = Memory()
# Instantiate Executor
executor = Executor(
llm_engine_name="dashscope", # AgentFlow uses dashscope for executor
root_cache_dir=query_cache_dir, # NOTE
verbose=False,
temperature=0.7,
enable_signal=False
)
# Instantiate Solver
solver = Solver(
planner=planner,
memory=memory,
executor=executor,
output_types=args.output_types, # Add new parameter
verbose=args.verbose,
max_steps=max_steps,
max_time=max_time,
query_cache_dir=query_cache_dir # NOTE
)
if solver is None:
return [["assistant", "β Error: Solver is not initialized. Please restart the application."]]
messages = [] # Initialize message list
for message_batch in solver.stream_solve_user_problem(user_query, messages):
yield [msg for msg in message_batch] # Ensure correct format for Gradio Chatbot
# Save steps
save_steps_data(
query_id=query_id,
memory=memory
)
def main(args):
#################### Gradio Interface ####################
# with gr.Blocks() as demo:
with gr.Blocks(theme=gr.themes.Ocean()) as demo:
# Theming https://www.gradio.app/guides/theming-guide
gr.Markdown("# ππ« Chat with AgentFlow: A Trainable Agentic Framework for Complex Reasoning") # Title
gr.Markdown("""
**AgentFlow** is a **trainable, tool-integrated agentic framework** designed to overcome the scalability and generalization limits of today's tool-augmented reasoning approaches. It introduces a **modular agentic system** (π§ Planner, π Executor, β
Verifier, and βοΈ Generator) and an **in-the-flow RL algorithm (Flow-GRPO)** to optimize the agent within the system for **effective planning and tool use**.
[Website](https://agentflow.stanford.edu/) |
[HF Paper](https://huggingface.co/papers/2510.05592) |
[GitHub](https://github.com/lupantech/AgentFlow) |
[Model](https://huggingface.co/AgentFlow/agentflow-planner-7b) |
[YouTube](https://www.youtube.com/watch?v=kIQbCQIH1SI) |
[X (Twitter)](https://x.com/lupantech/status/1976016000345919803) |
[Slack](https://join.slack.com/t/agentflow-co/shared_invite/zt-3f712xngl-LfxS4gmftAeKvcxR3nSkWQ)
> β³ **Note:** The first query may take ~20 seconds to initialize AgentFlow. Subsequent queries will be super fast.
>
> π‘ **Tip:** If the wait time is too long, please try again later.
""")
with gr.Row():
# Left column for settings
with gr.Column(scale=1):
# with gr.Row():
# if args.openai_api_source == "user_provided":
# print("Using API key from user input.")
# api_key = gr.Textbox(
# show_label=True,
# placeholder="Your API key will not be stored in any way.",
# type="password",
# label="OpenAI API Key",
# # container=False
# )
# else:
# print(f"Using local API key from environment variable: ...{os.getenv('OPENAI_API_KEY')[-4:]}")
# api_key = gr.Textbox(
# value=os.getenv("OPENAI_API_KEY"),
# visible=True,
# interactive=False
# )
with gr.Row():
llm_model_engine = gr.Textbox(
value="vllm-AgentFlow/agentflow-planner-7b",
label="π§ Planner Model",
interactive=False
)
with gr.Row():
gr.Textbox(
value="Qwen2.5-7B-Instruct",
label="π Executor, β
Verifier, and βοΈ Generator Model",
interactive=False
)
with gr.Row():
vllm_status = gr.Textbox(
value=get_vllm_status(),
label="vLLM Status",
interactive=False,
scale=4
)
refresh_status_btn = gr.Button("π Refresh", scale=1)
# Add click handler for refresh button
refresh_status_btn.click(
fn=get_vllm_status,
inputs=[],
outputs=vllm_status
)
with gr.Row():
max_steps = gr.Slider(value=5, minimum=1, maximum=10, step=1, label="Max Steps")
with gr.Row():
max_time = gr.Slider(value=240, minimum=60, maximum=300, step=30, label="Max Time (seconds)")
with gr.Row():
# Container for tools section
with gr.Column():
# First row for checkbox group
enabled_tools = gr.CheckboxGroup(
choices=all_tools,
value=all_tools,
label="Selected Tools",
)
# Second row for buttons
with gr.Row():
enable_all_btn = gr.Button("Select All Tools")
disable_all_btn = gr.Button("Clear All Tools")
# Add click handlers for the buttons
enable_all_btn.click(
lambda: all_tools,
outputs=enabled_tools
)
disable_all_btn.click(
lambda: [],
outputs=enabled_tools
)
with gr.Column(scale=5):
with gr.Row():
# Middle column for the query
with gr.Column(scale=2):
with gr.Row():
user_query = gr.Textbox(value="How many r letters are in the word strawberry?", placeholder="Type your question here...", label="Question (Required)", lines=3)
with gr.Row():
run_button = gr.Button("ππ« Submit and Run", variant="primary") # Run button with blue color
# Right column for the output
with gr.Column(scale=3):
chatbot_output = gr.Chatbot(type="messages", label="Step-wise Problem-Solving Output", height=500)
# TODO: Add actions to the buttons
with gr.Row(elem_id="buttons") as button_row:
upvote_btn = gr.Button(value="π Upvote", interactive=True, variant="primary")
downvote_btn = gr.Button(value="π Downvote", interactive=True, variant="primary")
# stop_btn = gr.Button(value="βοΈ Stop", interactive=True) # TODO
# clear_btn = gr.Button(value="ποΈ Clear history", interactive=True) # TODO
# TODO: Add comment textbox
with gr.Row():
comment_textbox = gr.Textbox(value="",
placeholder="Feel free to add any comments here. Thanks for using AgentFlow!",
label="π¬ Comment (Type and press Enter to submit.)", interactive=True)
# Update the button click handlers
upvote_btn.click(
fn=lambda: (save_feedback(QUERY_ID, "upvote"), gr.Info("Thank you for your upvote! π")),
inputs=[],
outputs=[]
)
downvote_btn.click(
fn=lambda: (save_feedback(QUERY_ID, "downvote"), gr.Info("Thank you for your feedback. We'll work to improve! π")),
inputs=[],
outputs=[]
)
# Add handler for comment submission
comment_textbox.submit(
fn=lambda comment: (save_feedback(QUERY_ID, "comment", comment), gr.Info("Thank you for your comment! β¨")),
inputs=[comment_textbox],
outputs=[]
)
# Bottom row for examples
with gr.Row():
with gr.Column(scale=5):
gr.Markdown("")
gr.Markdown("""
## π Try these examples with suggested tools.
""")
gr.Examples(
examples=[
[ "General Knowledge",
"What is the capital of France?",
["Base_Generator_Tool"],
"Paris"],
[ "Logical Reasoning",
"How many r letters are in the word strawberry?",
["Base_Generator_Tool", "Python_Coder_Tool"],
"3"],
[ "Web Search",
"Who is the mother-in-law of Olivera Despina?",
["Base_Generator_Tool", "Google_Search_Tool", "Wikipedia_Search_Tool", "Web_Search_Tool"],
"GΓΌlΓ§iΓ§ek Hatun"],
[ "Agentic Search",
"The object in the British Museum's collection with a museum number of 2012,5015.17 is the shell of a particular mollusk species. According to the abstract of a research article published in Science Advances in 2021, beads made from the shells of this species were found that are at least how many thousands of years old?",
["Base_Generator_Tool", "Python_Coder_Tool", "Google_Search_Tool", "Wikipedia_Search_Tool", "Web_Search_Tool"],
"142,000"],
[ "Arithmetic Reasoning",
"Which is bigger, 9.11 or 9.9?",
["Base_Generator_Tool", "Python_Coder_Tool"],
"9.9"],
[ "Multi-step Reasoning",
"Using the numbers [1, 1, 6, 9], create an expression that equals 24. You must use basic arithmetic operations (+, -, Γ, /) and parentheses. For example, one solution for [1, 2, 3, 4] is (1+2+3)Γ4.",
["Python_Coder_Tool"],
"((1 + 1) * 9) + 6"],
["Scentific Reasoning",
"An investigator is studying cellular regeneration of epithelial cells. She has obtained a tissue sample from a normal thyroid gland for histopathologic examination. It shows follicles lined by a single layer of cube-like cells with large central nuclei. Which of the following parts of the female reproductive tract is also lined by this type of epithelium?\nA. Ovaries\nB. Vagina\nC. Fallopian tubes\nD. Vulva\nChoose the correct option.",
["Base_Generator_Tool", "Google_Search_Tool", "Wikipedia_Search_Tool", "Python_Coder_Tool"],
"A. Ovaries"],
],
inputs=[gr.Textbox(label="Category", visible=False), user_query, enabled_tools, gr.Textbox(label="Reference Answer", visible=False)],
# label="Try these examples with suggested tools."
)
# Link button click to function
run_button.click(
fn=solve_problem_gradio,
inputs=[user_query, max_steps, max_time, llm_model_engine, enabled_tools],
outputs=chatbot_output,
concurrency_limit=10, # A10 GPU can handle ~10 concurrent requests with vLLM
concurrency_id="agentflow_solver" # Shared queue for managing GPU resource
)
#################### Gradio Interface ####################
# Configure queue for high traffic - optimized for A10 GPU (40G RAM, 24G VRAM)
demo.queue(
default_concurrency_limit=10, # Balanced for A10 GPU + vLLM inference
max_size=50, # Allow up to 20 requests in queue for traffic spikes
)
# Launch the Gradio app with optimized threading
# demo.launch(ssr_mode=False)
demo.launch(
ssr_mode=False,
share=True,
max_threads=80 # Increase from default 40 to support high concurrency
)
if __name__ == "__main__":
import atexit
args = parse_arguments()
# All tools for AgentFlow
all_tools = [
"Base_Generator_Tool",
"Python_Coder_Tool",
"Google_Search_Tool",
"Wikipedia_Search_Tool",
"Web_Search_Tool"
]
args.enabled_tools = ",".join(all_tools)
# NOTE: Use the same name for the query cache directory as the dataset directory
args.root_cache_dir = DATASET_DIR.name
# Start vLLM service
print("=" * 60)
print("π Checking vLLM service status...")
if not check_vllm_service():
print(f"β οΈ vLLM service not running. Starting {VLLM_MODEL_NAME}...")
start_vllm_service()
else:
print(f"β
vLLM service is already running on port {VLLM_PORT}")
print("=" * 60)
# Register cleanup function
# atexit.register(stop_vllm_service)
main(args)