Spaces:
Sleeping
Sleeping
Upload 9 files
Browse files- .gitattributes +35 -35
- .gitignore +4 -0
- README.md +15 -13
- app.py +215 -0
- get_real_rank.py +27 -0
- langgraph_dir/agent.py +156 -0
- langgraph_dir/custom_tools.py +559 -0
- langgraph_dir/prompt.py +11 -0
- requirements.txt +23 -0
.gitattributes
CHANGED
|
@@ -1,35 +1,35 @@
|
|
| 1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__pycache__
|
| 2 |
+
.DS_Store
|
| 3 |
+
tmp*
|
| 4 |
+
.env
|
README.md
CHANGED
|
@@ -1,13 +1,15 @@
|
|
| 1 |
-
---
|
| 2 |
-
title:
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
-
colorTo:
|
| 6 |
-
sdk: gradio
|
| 7 |
-
sdk_version: 5.
|
| 8 |
-
app_file: app.py
|
| 9 |
-
pinned: false
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Agents Course Final Assignment
|
| 3 |
+
emoji: 🕵🏻♂️
|
| 4 |
+
colorFrom: indigo
|
| 5 |
+
colorTo: indigo
|
| 6 |
+
sdk: gradio
|
| 7 |
+
sdk_version: 5.25.2
|
| 8 |
+
app_file: app.py
|
| 9 |
+
pinned: false
|
| 10 |
+
hf_oauth: true
|
| 11 |
+
# optional, default duration is 8 hours/480 minutes. Max duration is 30 days/43200 minutes.
|
| 12 |
+
hf_oauth_expiration_minutes: 480
|
| 13 |
+
---
|
| 14 |
+
|
| 15 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
|
@@ -0,0 +1,215 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import gradio as gr
|
| 3 |
+
import requests
|
| 4 |
+
import pandas as pd
|
| 5 |
+
from time import sleep
|
| 6 |
+
from tqdm import tqdm
|
| 7 |
+
|
| 8 |
+
# (Keep Constants as is)
|
| 9 |
+
# --- Constants ---
|
| 10 |
+
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
# --- Set framework to "langgraph" ---
|
| 14 |
+
FRAMEWORK = 'langgraph'
|
| 15 |
+
|
| 16 |
+
async def run_and_submit_all(profile: gr.OAuthProfile | None):
|
| 17 |
+
"""
|
| 18 |
+
Fetches all questions, runs the BasicAgent on them, submits all answers,
|
| 19 |
+
and displays the results.
|
| 20 |
+
"""
|
| 21 |
+
# --- Determine HF Space Runtime URL and Repo URL ---
|
| 22 |
+
space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
|
| 23 |
+
|
| 24 |
+
if profile:
|
| 25 |
+
username= f"{profile.username}"
|
| 26 |
+
print(f"User logged in: {username}")
|
| 27 |
+
else:
|
| 28 |
+
print("User not logged in.")
|
| 29 |
+
return "Please Login to Hugging Face with the button.", None
|
| 30 |
+
|
| 31 |
+
api_url = DEFAULT_API_URL
|
| 32 |
+
questions_url = f"{api_url}/questions"
|
| 33 |
+
submit_url = f"{api_url}/submit"
|
| 34 |
+
|
| 35 |
+
# 1. Instantiate Agent (modify this part to create your agent)
|
| 36 |
+
try:
|
| 37 |
+
if FRAMEWORK == 'langgraph':
|
| 38 |
+
from langgraph_dir.agent import LangGraphAgent
|
| 39 |
+
agent = LangGraphAgent(model_name="gpt-4.1-mini")
|
| 40 |
+
else:
|
| 41 |
+
raise AttributeError(
|
| 42 |
+
f"Set FRAMEWORK = 'langgraph', received: '{FRAMEWORK}'")
|
| 43 |
+
except Exception as e:
|
| 44 |
+
print(f"Error instantiating agent: {e}")
|
| 45 |
+
return f"Error initializing agent: {e}", None
|
| 46 |
+
# In the case of an app running as a hugging Face space, this link points toward your codebase (useful for others so please keep it public)
|
| 47 |
+
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
|
| 48 |
+
print(agent_code)
|
| 49 |
+
|
| 50 |
+
# 2. Fetch Questions
|
| 51 |
+
print(f"Fetching questions from: {questions_url}")
|
| 52 |
+
try:
|
| 53 |
+
response = requests.get(questions_url, timeout=15)
|
| 54 |
+
response.raise_for_status()
|
| 55 |
+
questions_data = response.json()
|
| 56 |
+
if not questions_data:
|
| 57 |
+
print("Fetched questions list is empty.")
|
| 58 |
+
return "Fetched questions list is empty or invalid format.", None
|
| 59 |
+
print(f"Fetched {len(questions_data)} questions.")
|
| 60 |
+
except requests.exceptions.RequestException as e:
|
| 61 |
+
print(f"Error fetching questions: {e}")
|
| 62 |
+
return f"Error fetching questions: {e}", None
|
| 63 |
+
except requests.exceptions.JSONDecodeError as e:
|
| 64 |
+
print(f"Error decoding JSON response from questions endpoint: {e}")
|
| 65 |
+
print(f"Response text: {response.text[:500]}")
|
| 66 |
+
return f"Error decoding server response for questions: {e}", None
|
| 67 |
+
except Exception as e:
|
| 68 |
+
print(f"An unexpected error occurred fetching questions: {e}")
|
| 69 |
+
return f"An unexpected error occurred fetching questions: {e}", None
|
| 70 |
+
|
| 71 |
+
# 3. Run your Agent
|
| 72 |
+
results_log = []
|
| 73 |
+
answers_payload = []
|
| 74 |
+
print(f"Running agent on {len(questions_data)} questions...")
|
| 75 |
+
for item in questions_data:
|
| 76 |
+
task_id = item.get("task_id")
|
| 77 |
+
question_text = item.get("question")
|
| 78 |
+
file_name = item.get("file_name")
|
| 79 |
+
if not task_id or question_text is None:
|
| 80 |
+
print(f"Skipping item with missing task_id or question: {item}")
|
| 81 |
+
continue
|
| 82 |
+
try:
|
| 83 |
+
if file_name:
|
| 84 |
+
# add the URL of the data source to the question (so that the agent can deal with it)
|
| 85 |
+
file_url = f"{DEFAULT_API_URL}/files/{task_id}"
|
| 86 |
+
question_text += f'\nFile URL: "{file_url}"'
|
| 87 |
+
# get the extension of the file to help the agent
|
| 88 |
+
try:
|
| 89 |
+
ext = file_name.split('.')[-1]
|
| 90 |
+
question_text += f" (.{ext} file)"
|
| 91 |
+
except:
|
| 92 |
+
pass
|
| 93 |
+
|
| 94 |
+
# call the agent
|
| 95 |
+
submitted_answer = agent(question_text)
|
| 96 |
+
|
| 97 |
+
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
| 98 |
+
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
| 99 |
+
|
| 100 |
+
# wait 1 minute before next call to avoid reaching limit of token per minute (TPM)
|
| 101 |
+
print('\n\n-> Sleeping for 1 minute to avoid reaching limit of token per minute (TPM)')
|
| 102 |
+
for _ in tqdm(range(60)): # tqdm to see time we have to wait
|
| 103 |
+
sleep(1)
|
| 104 |
+
except Exception as e:
|
| 105 |
+
print(f"Error running agent on task {task_id}: {e}")
|
| 106 |
+
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
|
| 107 |
+
|
| 108 |
+
if not answers_payload:
|
| 109 |
+
print("Agent did not produce any answers to submit.")
|
| 110 |
+
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
| 111 |
+
|
| 112 |
+
# 4. Prepare Submission
|
| 113 |
+
submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
|
| 114 |
+
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
|
| 115 |
+
print(status_update)
|
| 116 |
+
|
| 117 |
+
# 5. Submit
|
| 118 |
+
print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
|
| 119 |
+
try:
|
| 120 |
+
response = requests.post(submit_url, json=submission_data, timeout=60)
|
| 121 |
+
response.raise_for_status()
|
| 122 |
+
result_data = response.json()
|
| 123 |
+
final_status = (
|
| 124 |
+
f"Submission Successful!\n"
|
| 125 |
+
f"User: {result_data.get('username')}\n"
|
| 126 |
+
f"Overall Score: {result_data.get('score', 'N/A')}% "
|
| 127 |
+
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
|
| 128 |
+
f"Message: {result_data.get('message', 'No message received.')}"
|
| 129 |
+
)
|
| 130 |
+
print("Submission successful.")
|
| 131 |
+
results_df = pd.DataFrame(results_log)
|
| 132 |
+
return final_status, results_df
|
| 133 |
+
except requests.exceptions.HTTPError as e:
|
| 134 |
+
error_detail = f"Server responded with status {e.response.status_code}."
|
| 135 |
+
try:
|
| 136 |
+
error_json = e.response.json()
|
| 137 |
+
error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
|
| 138 |
+
except requests.exceptions.JSONDecodeError:
|
| 139 |
+
error_detail += f" Response: {e.response.text[:500]}"
|
| 140 |
+
status_message = f"Submission Failed: {error_detail}"
|
| 141 |
+
print(status_message)
|
| 142 |
+
results_df = pd.DataFrame(results_log)
|
| 143 |
+
return status_message, results_df
|
| 144 |
+
except requests.exceptions.Timeout:
|
| 145 |
+
status_message = "Submission Failed: The request timed out."
|
| 146 |
+
print(status_message)
|
| 147 |
+
results_df = pd.DataFrame(results_log)
|
| 148 |
+
return status_message, results_df
|
| 149 |
+
except requests.exceptions.RequestException as e:
|
| 150 |
+
status_message = f"Submission Failed: Network error - {e}"
|
| 151 |
+
print(status_message)
|
| 152 |
+
results_df = pd.DataFrame(results_log)
|
| 153 |
+
return status_message, results_df
|
| 154 |
+
except Exception as e:
|
| 155 |
+
status_message = f"An unexpected error occurred during submission: {e}"
|
| 156 |
+
print(status_message)
|
| 157 |
+
results_df = pd.DataFrame(results_log)
|
| 158 |
+
return status_message, results_df
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
# --- Build Gradio Interface using Blocks ---
|
| 162 |
+
with gr.Blocks() as demo:
|
| 163 |
+
gr.Markdown("# Basic Agent Evaluation Runner")
|
| 164 |
+
gr.Markdown(
|
| 165 |
+
"""
|
| 166 |
+
**Instructions:**
|
| 167 |
+
|
| 168 |
+
1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
|
| 169 |
+
2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
|
| 170 |
+
3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
|
| 171 |
+
|
| 172 |
+
---
|
| 173 |
+
**Disclaimers:**
|
| 174 |
+
Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
|
| 175 |
+
This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
|
| 176 |
+
"""
|
| 177 |
+
)
|
| 178 |
+
|
| 179 |
+
gr.LoginButton()
|
| 180 |
+
|
| 181 |
+
run_button = gr.Button("Run Evaluation & Submit All Answers")
|
| 182 |
+
|
| 183 |
+
status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
|
| 184 |
+
# Removed max_rows=10 from DataFrame constructor
|
| 185 |
+
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
|
| 186 |
+
|
| 187 |
+
run_button.click(
|
| 188 |
+
fn=run_and_submit_all,
|
| 189 |
+
outputs=[status_output, results_table]
|
| 190 |
+
)
|
| 191 |
+
|
| 192 |
+
if __name__ == "__main__":
|
| 193 |
+
print("\n" + "-"*30 + " App Starting " + "-"*30)
|
| 194 |
+
|
| 195 |
+
# Check for SPACE_HOST and SPACE_ID at startup for information
|
| 196 |
+
space_host_startup = os.getenv("SPACE_HOST")
|
| 197 |
+
space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
|
| 198 |
+
|
| 199 |
+
if space_host_startup:
|
| 200 |
+
print(f"✅ SPACE_HOST found: {space_host_startup}")
|
| 201 |
+
print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
|
| 202 |
+
else:
|
| 203 |
+
print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
|
| 204 |
+
|
| 205 |
+
if space_id_startup: # Print repo URLs if SPACE_ID is found
|
| 206 |
+
print(f"✅ SPACE_ID found: {space_id_startup}")
|
| 207 |
+
print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
|
| 208 |
+
print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
|
| 209 |
+
else:
|
| 210 |
+
print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
|
| 211 |
+
|
| 212 |
+
print("-"*(60 + len(" App Starting ")) + "\n")
|
| 213 |
+
|
| 214 |
+
print("Launching Gradio Interface for Basic Agent Evaluation...")
|
| 215 |
+
demo.launch(debug=True, share=False)
|
get_real_rank.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# code to get its "true" rank on the agent course challenge
|
| 2 |
+
# - remove all the users that used the code space of someone else (won't work for the forks)
|
| 3 |
+
# - set the same rank for all the people with same score
|
| 4 |
+
|
| 5 |
+
import pandas as pd
|
| 6 |
+
from datasets import load_dataset
|
| 7 |
+
|
| 8 |
+
username = 'ascythe'
|
| 9 |
+
|
| 10 |
+
dataset = load_dataset('agents-course/unit4-students-scores')
|
| 11 |
+
df = pd.DataFrame(dataset['train'])
|
| 12 |
+
df.sort_values('score', ascending=False, inplace=True)
|
| 13 |
+
|
| 14 |
+
# keep only users that submitted from their own code space or than ran locally (with None in code space)
|
| 15 |
+
df['to_keep'] = df.apply(lambda row: row['username'] in row['code'] or 'None' in row['code'], axis=1)
|
| 16 |
+
df = df[df['to_keep'] == True]
|
| 17 |
+
|
| 18 |
+
# compute rank (all users with same score have the same rank)
|
| 19 |
+
ranks_to_add = 0
|
| 20 |
+
for i, score in enumerate(df['score'].unique()):
|
| 21 |
+
df.loc[df['score'] == score, 'rank'] = ranks_to_add + i + 1
|
| 22 |
+
ranks_to_add += len(df[df['score'] == score]) - 1
|
| 23 |
+
|
| 24 |
+
# find `username` rank
|
| 25 |
+
rank = int(df[df['username'] == username]['rank'].values[0])
|
| 26 |
+
total = len(df['code'].unique())
|
| 27 |
+
print(f"{username} rank: {rank}/{total} (top {rank/total*100:.1f}%)")
|
langgraph_dir/agent.py
ADDED
|
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# env variable needed: OPENAI_API_KEY, BRAVE_SEARCH_API_KEY
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import json
|
| 5 |
+
from dotenv import load_dotenv
|
| 6 |
+
|
| 7 |
+
from typing import Literal
|
| 8 |
+
from langchain_openai import ChatOpenAI
|
| 9 |
+
from langgraph.graph import MessagesState
|
| 10 |
+
from langchain_core.messages import SystemMessage, HumanMessage, ToolMessage
|
| 11 |
+
from langgraph.graph import StateGraph, START, END
|
| 12 |
+
from langchain_community.tools import BraveSearch, WikipediaQueryRun
|
| 13 |
+
from langchain_community.utilities import WikipediaAPIWrapper
|
| 14 |
+
|
| 15 |
+
from .prompt import system_prompt
|
| 16 |
+
from .custom_tools import (calculator_tool, web_search, query_image, python_repl,
|
| 17 |
+
get_webdoc_content, get_website_content, extract_answer_from_content,
|
| 18 |
+
transcribe_audio, get_youtube_transcript, generate_table_from_data, check_commutative)
|
| 19 |
+
|
| 20 |
+
load_dotenv()
|
| 21 |
+
# get API key from openai, and then secure the OpenAI API key in env
|
| 22 |
+
openai_api_key = os.environ['OPENAI_API_KEY']
|
| 23 |
+
|
| 24 |
+
class LangGraphAgent:
|
| 25 |
+
def __init__(self,
|
| 26 |
+
model_name="gpt-4.1-mini",
|
| 27 |
+
show_tools_desc=True,
|
| 28 |
+
show_prompt=True):
|
| 29 |
+
|
| 30 |
+
# =========== LLM definition ===========
|
| 31 |
+
llm = ChatOpenAI(model=model_name, temperature=0, openai_api_key=openai_api_key)
|
| 32 |
+
print(f"LangGraphAgent initialized with model \"{model_name}\"")
|
| 33 |
+
|
| 34 |
+
# =========== Augment the LLM with tools ===========
|
| 35 |
+
community_tools = [
|
| 36 |
+
BraveSearch.from_api_key( # Web search (more performant than DuckDuckGo)
|
| 37 |
+
api_key=os.getenv("BRAVE_SEARCH_API_KEY"), # needs BRAVE_SEARCH_API_KEY in env
|
| 38 |
+
search_kwargs={"count": 5}),
|
| 39 |
+
]
|
| 40 |
+
|
| 41 |
+
wikipedia_tool = WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper())
|
| 42 |
+
|
| 43 |
+
custom_tools = [
|
| 44 |
+
calculator_tool, # Basic math operations
|
| 45 |
+
web_search, # Web search using Tavily
|
| 46 |
+
query_image, # Ask anything about an image using a VLM
|
| 47 |
+
python_repl, # Python code interpreter
|
| 48 |
+
get_webdoc_content, # Load a web document
|
| 49 |
+
get_website_content, # Load a web page
|
| 50 |
+
extract_answer_from_content, # Extract an answer from a given content (e.g. PDF, web page)
|
| 51 |
+
transcribe_audio, # Transcribe an audio file to text
|
| 52 |
+
get_youtube_transcript, # Get the transcript of a YouTube video
|
| 53 |
+
generate_table_from_data, # Generate a table from a given data
|
| 54 |
+
check_commutative, # Analyzes a binary operation table for commutativity
|
| 55 |
+
]
|
| 56 |
+
|
| 57 |
+
tools = [wikipedia_tool] + community_tools + custom_tools
|
| 58 |
+
tools_by_name = {tool.name: tool for tool in tools}
|
| 59 |
+
llm_with_tools = llm.bind_tools(tools)
|
| 60 |
+
|
| 61 |
+
# =========== Agent definition ===========
|
| 62 |
+
|
| 63 |
+
# Nodes
|
| 64 |
+
def llm_call(state: MessagesState):
|
| 65 |
+
"""LLM decides whether to call a tool or not"""
|
| 66 |
+
|
| 67 |
+
return {
|
| 68 |
+
"messages": [
|
| 69 |
+
llm_with_tools.invoke(
|
| 70 |
+
[
|
| 71 |
+
SystemMessage(
|
| 72 |
+
content=system_prompt
|
| 73 |
+
)
|
| 74 |
+
]
|
| 75 |
+
+ state["messages"]
|
| 76 |
+
)
|
| 77 |
+
]
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
def tool_node(state: dict):
|
| 81 |
+
"""Performs the tool call"""
|
| 82 |
+
|
| 83 |
+
result = []
|
| 84 |
+
for tool_call in state["messages"][-1].tool_calls:
|
| 85 |
+
tool = tools_by_name[tool_call["name"]]
|
| 86 |
+
observation = tool.invoke(tool_call["args"])
|
| 87 |
+
result.append(ToolMessage(content=observation, tool_call_id=tool_call["id"]))
|
| 88 |
+
return {"messages": result}
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
# Conditional edge function to route to the tool node or end based upon whether the LLM made a tool call
|
| 92 |
+
def should_continue(state: MessagesState) -> Literal["environment", END]:
|
| 93 |
+
"""Decide if we should continue the loop or stop based upon whether the LLM made a tool call"""
|
| 94 |
+
|
| 95 |
+
messages = state["messages"]
|
| 96 |
+
last_message = messages[-1]
|
| 97 |
+
# If the LLM makes a tool call, then perform an action
|
| 98 |
+
if last_message.tool_calls:
|
| 99 |
+
return "Action"
|
| 100 |
+
# Otherwise, we stop (reply to the user)
|
| 101 |
+
return END
|
| 102 |
+
|
| 103 |
+
# Build workflow
|
| 104 |
+
agent_builder = StateGraph(MessagesState)
|
| 105 |
+
|
| 106 |
+
# Add nodes
|
| 107 |
+
agent_builder.add_node("llm_call", llm_call)
|
| 108 |
+
agent_builder.add_node("environment", tool_node)
|
| 109 |
+
|
| 110 |
+
# Add edges to connect nodes
|
| 111 |
+
agent_builder.add_edge(START, "llm_call")
|
| 112 |
+
agent_builder.add_conditional_edges(
|
| 113 |
+
"llm_call",
|
| 114 |
+
should_continue,
|
| 115 |
+
{
|
| 116 |
+
# Name returned by should_continue : Name of next node to visit
|
| 117 |
+
"Action": "environment",
|
| 118 |
+
END: END,
|
| 119 |
+
},
|
| 120 |
+
)
|
| 121 |
+
agent_builder.add_edge("environment", "llm_call")
|
| 122 |
+
|
| 123 |
+
# Compile the agent
|
| 124 |
+
self.agent = agent_builder.compile()
|
| 125 |
+
|
| 126 |
+
if show_tools_desc:
|
| 127 |
+
for i, tool in enumerate(llm_with_tools.kwargs['tools']):
|
| 128 |
+
print("\n" + "="*30 + f" Tool {i+1} " + "="*30)
|
| 129 |
+
print(json.dumps(tool[tool['type']], indent=4))
|
| 130 |
+
|
| 131 |
+
if show_prompt:
|
| 132 |
+
print("\n" + "="*30 + f" System prompt " + "="*30)
|
| 133 |
+
print(system_prompt)
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def __call__(self, question: str) -> str:
|
| 137 |
+
print("\n\n"+"*"*20)
|
| 138 |
+
print(f"Agent received question: {question}")
|
| 139 |
+
print("*"*20)
|
| 140 |
+
|
| 141 |
+
# Invoke
|
| 142 |
+
messages = [HumanMessage(content=question)]
|
| 143 |
+
messages = self.agent.invoke({"messages": messages},
|
| 144 |
+
{"recursion_limit": 30}) # maximum number of steps before hitting a stop condition
|
| 145 |
+
for m in messages["messages"]:
|
| 146 |
+
m.pretty_print()
|
| 147 |
+
|
| 148 |
+
# post-process the response (keep only what's after "FINAL ANSWER:" for the exact match)
|
| 149 |
+
response = str(messages["messages"][-1].content)
|
| 150 |
+
try:
|
| 151 |
+
response = response.split("FINAL ANSWER:")[-1].strip()
|
| 152 |
+
except:
|
| 153 |
+
print('Could not split response on "FINAL ANSWER:"')
|
| 154 |
+
print("\n\n"+"-"*50)
|
| 155 |
+
print(f"Agent returning with answer: {response}")
|
| 156 |
+
return response
|
langgraph_dir/custom_tools.py
ADDED
|
@@ -0,0 +1,559 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain_core.tools import tool, Tool
|
| 2 |
+
import math
|
| 3 |
+
|
| 4 |
+
@tool
|
| 5 |
+
def calculator_tool(expression: str) -> str:
|
| 6 |
+
"""
|
| 7 |
+
Evaluate a mathematical expression.
|
| 8 |
+
"""
|
| 9 |
+
# Define the restricted global and local namespace
|
| 10 |
+
safe_globals = {"__builtins__": {}}
|
| 11 |
+
|
| 12 |
+
safe_locals = {
|
| 13 |
+
# Math functions
|
| 14 |
+
'sqrt': math.sqrt,
|
| 15 |
+
'sin': math.sin,
|
| 16 |
+
'cos': math.cos,
|
| 17 |
+
'tan': math.tan,
|
| 18 |
+
'log': math.log10, # log base 10
|
| 19 |
+
'ln': math.log, # natural log
|
| 20 |
+
'exp': math.exp,
|
| 21 |
+
'pow': pow,
|
| 22 |
+
|
| 23 |
+
# Constants
|
| 24 |
+
'pi': math.pi,
|
| 25 |
+
'e': math.e,
|
| 26 |
+
|
| 27 |
+
# Built-in math utilities
|
| 28 |
+
'abs': abs,
|
| 29 |
+
'round': round,
|
| 30 |
+
'max': max,
|
| 31 |
+
'min': min,
|
| 32 |
+
'sum': sum,
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
try:
|
| 36 |
+
# Evaluate the expression in a restricted environment
|
| 37 |
+
result = eval(expression, safe_globals, safe_locals)
|
| 38 |
+
|
| 39 |
+
# Handle None explicitly
|
| 40 |
+
if result is None:
|
| 41 |
+
return "calculator tool produced no valid result"
|
| 42 |
+
|
| 43 |
+
# Optional: Round very small floats to avoid scientific notation
|
| 44 |
+
if isinstance(result, float) and abs(result) < 1e-9:
|
| 45 |
+
result = round(result, 10)
|
| 46 |
+
|
| 47 |
+
return str(result)
|
| 48 |
+
|
| 49 |
+
except SyntaxError as se:
|
| 50 |
+
return f"Syntax error in expression: {str(se)}"
|
| 51 |
+
except NameError as ne:
|
| 52 |
+
return f"Undefined variable or function used: {str(ne)}"
|
| 53 |
+
except ZeroDivisionError:
|
| 54 |
+
return "Error: Division by zero"
|
| 55 |
+
except Exception as e:
|
| 56 |
+
return f"Evaluation error: {str(e)}"
|
| 57 |
+
|
| 58 |
+
from langchain_tavily import TavilySearch
|
| 59 |
+
|
| 60 |
+
@tool
|
| 61 |
+
def web_search(query: str) -> str:
|
| 62 |
+
"""
|
| 63 |
+
Searches the web and returns a list of the most relevant URLs.
|
| 64 |
+
Use this FIRST for complex queries, metadata questions, or to find the right sources.
|
| 65 |
+
Then follow up with get_webdoc_content or get_website_content on the most promising URL.
|
| 66 |
+
"""
|
| 67 |
+
try:
|
| 68 |
+
tavily_search = TavilySearch(
|
| 69 |
+
max_results=5,
|
| 70 |
+
topic="general",
|
| 71 |
+
search_depth="advanced",
|
| 72 |
+
include_raw_content=False, # Just URLs and snippets
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
results = tavily_search.invoke(query)
|
| 76 |
+
# Format results to show URLs and brief descriptions
|
| 77 |
+
web_search_results = "Search Results:\n"
|
| 78 |
+
for i, result in enumerate(results["results"], 1):
|
| 79 |
+
web_search_results += f"{i}. {result['title']}: {result['url']}\n {result['content'][:150]}...\n\n"
|
| 80 |
+
|
| 81 |
+
return web_search_results
|
| 82 |
+
except Exception as e:
|
| 83 |
+
return f"web_search tool error: {str(e)}"
|
| 84 |
+
|
| 85 |
+
import os
|
| 86 |
+
import tempfile
|
| 87 |
+
import requests
|
| 88 |
+
import easyocr
|
| 89 |
+
from io import BytesIO
|
| 90 |
+
from PIL import Image
|
| 91 |
+
from openai import OpenAI
|
| 92 |
+
|
| 93 |
+
@tool
|
| 94 |
+
def query_image(query: str, source: str, need_ocr: bool = True, need_reasoning: bool = False) -> str:
|
| 95 |
+
"""Use ONLY to answer question about an image using a Vision Language Model.
|
| 96 |
+
NOT used to perform image processing or other tasks EXCEPT asking question about an image.
|
| 97 |
+
Args:
|
| 98 |
+
query (str): The question about the image, e.g. how many persons are on the image?
|
| 99 |
+
source (str): URL to the image
|
| 100 |
+
need_reasoning (bool): Set to True for complex query that require a reasoning model to answer properly. Set to False otherwise.
|
| 101 |
+
need_ocr (bool): If True, also extract visible text from the image. Set to False otherwise.
|
| 102 |
+
"""
|
| 103 |
+
|
| 104 |
+
try:
|
| 105 |
+
# OCR Extraction (optional)
|
| 106 |
+
ocr_text = ""
|
| 107 |
+
if need_ocr:
|
| 108 |
+
try:
|
| 109 |
+
# Download image from URL
|
| 110 |
+
response = requests.get(source, stream=True, timeout=10)
|
| 111 |
+
response.raise_for_status()
|
| 112 |
+
|
| 113 |
+
# Load image into PIL
|
| 114 |
+
image = Image.open(BytesIO(response.content))
|
| 115 |
+
|
| 116 |
+
# Save to temporary file
|
| 117 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix='.jpg') as tmpfile:
|
| 118 |
+
image.save(tmpfile, format=image.format)
|
| 119 |
+
file_to_use = tmpfile.name
|
| 120 |
+
|
| 121 |
+
# Perform OCR
|
| 122 |
+
reader = easyocr.Reader(['en'])
|
| 123 |
+
results = reader.readtext(file_to_use)
|
| 124 |
+
ocr_text = "\n".join([res[1] for res in results])
|
| 125 |
+
ocr_text = f"\n\n[OCR Extracted Text]:\n{ocr_text}"
|
| 126 |
+
|
| 127 |
+
except Exception as ocr_error:
|
| 128 |
+
ocr_text = f"\n\n[OCR Error]: {str(ocr_error)}"
|
| 129 |
+
finally:
|
| 130 |
+
# Clean up temporary file
|
| 131 |
+
if file_to_use and os.path.exists(file_to_use):
|
| 132 |
+
os.unlink(file_to_use)
|
| 133 |
+
|
| 134 |
+
# Query Vision Language Model
|
| 135 |
+
client = OpenAI()
|
| 136 |
+
if need_reasoning:
|
| 137 |
+
model_name = "o4-mini"
|
| 138 |
+
else:
|
| 139 |
+
model_name = "gpt-4o-mini"
|
| 140 |
+
response = client.chat.completions.create(
|
| 141 |
+
model=model_name,
|
| 142 |
+
messages=[
|
| 143 |
+
{
|
| 144 |
+
"role": "user",
|
| 145 |
+
"content": [
|
| 146 |
+
{"type": "text", "text": query},
|
| 147 |
+
{"type": "image_url", "image_url": {"url": source}},
|
| 148 |
+
],
|
| 149 |
+
}
|
| 150 |
+
],
|
| 151 |
+
max_tokens=512,
|
| 152 |
+
)
|
| 153 |
+
content = response.choices[0].message.content
|
| 154 |
+
|
| 155 |
+
# Combine OCR and VLM output
|
| 156 |
+
final_response = content
|
| 157 |
+
if need_ocr and ocr_text:
|
| 158 |
+
final_response += ocr_text
|
| 159 |
+
|
| 160 |
+
return final_response
|
| 161 |
+
|
| 162 |
+
except Exception as e:
|
| 163 |
+
return f"Image query failed: {str(e)}"
|
| 164 |
+
|
| 165 |
+
from pydantic import BaseModel, Field
|
| 166 |
+
from e2b import Sandbox
|
| 167 |
+
import re
|
| 168 |
+
import os
|
| 169 |
+
|
| 170 |
+
class PythonCodeInput(BaseModel):
|
| 171 |
+
code: str = Field(description="The Python code string to execute.")
|
| 172 |
+
|
| 173 |
+
@tool(args_schema=PythonCodeInput)
|
| 174 |
+
def python_repl(code: str) -> str:
|
| 175 |
+
"""
|
| 176 |
+
Use this to execute single or multi-line Python commands to perform tasks like:
|
| 177 |
+
sort a list in ascending or descending order, reverse input string, draw a table, photo processing, etc.
|
| 178 |
+
|
| 179 |
+
Input should be syntactically valid Python code.
|
| 180 |
+
Make sure to include required imports in the code.
|
| 181 |
+
Always include in your code `print(...)` or `image.save(...)` to return outputs that can be seen.
|
| 182 |
+
|
| 183 |
+
You are allowed to access internet and download files from URLs via code (e.g., using requests)
|
| 184 |
+
Avoid using any system-level commands or libraries that could harm the host system.
|
| 185 |
+
Avoid commands that require user input or block indefinitely (e.g., `input()`).
|
| 186 |
+
"""
|
| 187 |
+
|
| 188 |
+
# List of forbidden patterns in code
|
| 189 |
+
FORBIDDEN_PATTERNS = [
|
| 190 |
+
r'\bimport\s+(os|sys|subprocess|shutil|socket)',
|
| 191 |
+
r'\b(eval|exec|input|open)\s*$(?=.*\w)',
|
| 192 |
+
r'\b__import__',
|
| 193 |
+
r'\bos\.',
|
| 194 |
+
r'\bsys\.',
|
| 195 |
+
r'\bsubprocess\.',
|
| 196 |
+
]
|
| 197 |
+
|
| 198 |
+
# Step 1: Keyword-based security check
|
| 199 |
+
for pattern in FORBIDDEN_PATTERNS:
|
| 200 |
+
if re.search(pattern, code):
|
| 201 |
+
match = re.search(pattern, code).group()
|
| 202 |
+
return f"Blocked unsafe operation: {match}"
|
| 203 |
+
|
| 204 |
+
# Step 2: Create E2B sandbox
|
| 205 |
+
try:
|
| 206 |
+
with Sandbox(api_key=os.getenv("E2B_API_KEY")) as sandbox:
|
| 207 |
+
# Known mismatches: import name -> pip package name
|
| 208 |
+
import_to_pip = {
|
| 209 |
+
"PIL": "pillow",
|
| 210 |
+
"cv2": "opencv-python",
|
| 211 |
+
"yaml": "PyYAML",
|
| 212 |
+
"bs4": "beautifulsoup4",
|
| 213 |
+
"tkinter": "tk",
|
| 214 |
+
}
|
| 215 |
+
|
| 216 |
+
# Built-in modules that don't need installation
|
| 217 |
+
built_in_modules = {
|
| 218 |
+
"math", "re", "json", "csv", "os", "sys", "time", "datetime", "random",
|
| 219 |
+
"itertools", "functools", "__future__", "collections", "pathlib", "io",
|
| 220 |
+
}
|
| 221 |
+
|
| 222 |
+
# Step 1: Extract import statements
|
| 223 |
+
import_matches = re.findall(
|
| 224 |
+
r'(?:import\s+([a-zA-Z0-9_]+)(?!\.)|\bfrom\s+([a-zA-Z0-9_]+)(?=\s+import\b))',
|
| 225 |
+
code
|
| 226 |
+
)
|
| 227 |
+
base_imports = set()
|
| 228 |
+
base_imports = set(match[0] or match[1] for match in import_matches) # match[0] = 'import X', match[1] = 'from X import Y'
|
| 229 |
+
|
| 230 |
+
# Step 2: Determine which packages to install
|
| 231 |
+
packages_to_install = set()
|
| 232 |
+
|
| 233 |
+
for imp in base_imports:
|
| 234 |
+
# Skip known built-ins
|
| 235 |
+
if imp in built_in_modules:
|
| 236 |
+
continue
|
| 237 |
+
|
| 238 |
+
# Use mapped name if exists, else use import name
|
| 239 |
+
package_name = import_to_pip.get(imp, imp)
|
| 240 |
+
|
| 241 |
+
# Avoid installing system-specific modules like __pycache__
|
| 242 |
+
if imp.startswith("__"):
|
| 243 |
+
continue
|
| 244 |
+
|
| 245 |
+
packages_to_install.add(package_name)
|
| 246 |
+
|
| 247 |
+
# Step 3: Install necessary packages
|
| 248 |
+
if packages_to_install:
|
| 249 |
+
install_cmd = f"pip install {' '.join(packages_to_install)}"
|
| 250 |
+
result = sandbox.commands.run(install_cmd)
|
| 251 |
+
|
| 252 |
+
if result.stderr:
|
| 253 |
+
return f"Failed to install packages:\n{result.stderr}"
|
| 254 |
+
|
| 255 |
+
# Step 4: Write and run the user code
|
| 256 |
+
CODE_FILE_PATH = "/tmp/code.py"
|
| 257 |
+
sandbox.files.write(CODE_FILE_PATH, code)
|
| 258 |
+
|
| 259 |
+
# Step 5: Execute the code using the new API
|
| 260 |
+
result = sandbox.commands.run(f"python {CODE_FILE_PATH}")
|
| 261 |
+
stdout = result.stdout.strip()
|
| 262 |
+
stderr = result.stderr.strip()
|
| 263 |
+
|
| 264 |
+
# Step 6: Return output
|
| 265 |
+
if stderr:
|
| 266 |
+
return f"Execution error:\n{stderr}"
|
| 267 |
+
|
| 268 |
+
return stdout or "No output"
|
| 269 |
+
|
| 270 |
+
except Exception as e:
|
| 271 |
+
return f"Sandbox error: {str(e)}"
|
| 272 |
+
|
| 273 |
+
import requests
|
| 274 |
+
from bs4 import BeautifulSoup
|
| 275 |
+
from PyPDF2 import PdfReader
|
| 276 |
+
from io import BytesIO
|
| 277 |
+
from markdownify import markdownify
|
| 278 |
+
|
| 279 |
+
@tool
|
| 280 |
+
def get_webdoc_content(url: str) -> str:
|
| 281 |
+
"""
|
| 282 |
+
Extracts content from PDFs or document-like URLs (academic papers, reports)
|
| 283 |
+
Can be used after web_search to get detailed information.
|
| 284 |
+
Args:
|
| 285 |
+
url (str): the URL of web page to extract the content from
|
| 286 |
+
"""
|
| 287 |
+
try:
|
| 288 |
+
headers = {
|
| 289 |
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
|
| 290 |
+
}
|
| 291 |
+
|
| 292 |
+
response = requests.get(url, headers=headers, timeout=10)
|
| 293 |
+
response.raise_for_status()
|
| 294 |
+
|
| 295 |
+
content_type = response.headers.get('Content-Type', '')
|
| 296 |
+
|
| 297 |
+
# PDF Handling
|
| 298 |
+
if 'application/pdf' in content_type:
|
| 299 |
+
pdf_file = BytesIO(response.content)
|
| 300 |
+
reader = PdfReader(pdf_file)
|
| 301 |
+
text = "\n".join(page.extract_text() for page in reader.pages)
|
| 302 |
+
# return f"## PDF Content from {page_url}\n\n{text[:15000]}"
|
| 303 |
+
return f"## PDF Content from {url}\n\n```\n{text[:15000]}\n```"
|
| 304 |
+
|
| 305 |
+
# HTML Document Handling
|
| 306 |
+
elif 'text/html' in content_type:
|
| 307 |
+
soup = BeautifulSoup(response.text, 'html.parser')
|
| 308 |
+
cleaned_html = soup.body or soup # Fallback to full document
|
| 309 |
+
return markdownify(str(cleaned_html), strip=['a'])
|
| 310 |
+
|
| 311 |
+
# Fallback: Raw text extraction
|
| 312 |
+
else:
|
| 313 |
+
return f"## Raw Content from {url}\n\n{response.text[:15000]}"
|
| 314 |
+
|
| 315 |
+
except requests.exceptions.RequestException as e:
|
| 316 |
+
return f"HTTP error in get_webpage_content: {str(e)}"
|
| 317 |
+
except Exception as e:
|
| 318 |
+
return f"Unexpected error in get_webpage_content: {str(e)}"
|
| 319 |
+
|
| 320 |
+
import requests
|
| 321 |
+
from bs4 import BeautifulSoup
|
| 322 |
+
from markdownify import markdownify
|
| 323 |
+
|
| 324 |
+
@tool
|
| 325 |
+
def get_website_content(url: str) -> str:
|
| 326 |
+
"""
|
| 327 |
+
Extracts contents from HTML-based URLs.
|
| 328 |
+
Specializes in Wikipedia, technical documentation, and discussion pages.
|
| 329 |
+
NOT used for document-based URLs (academic papers, reports).
|
| 330 |
+
Used after web_search to get detailed information.
|
| 331 |
+
Args:
|
| 332 |
+
url (str): The URL of the web page to extract content from
|
| 333 |
+
"""
|
| 334 |
+
try:
|
| 335 |
+
headers = {
|
| 336 |
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
|
| 337 |
+
}
|
| 338 |
+
|
| 339 |
+
response = requests.get(url, headers=headers, timeout=10)
|
| 340 |
+
response.raise_for_status()
|
| 341 |
+
|
| 342 |
+
soup = BeautifulSoup(response.text, 'html.parser')
|
| 343 |
+
# Remove non-content elements
|
| 344 |
+
for element in soup.select('script, style, footer, nav, header, aside'):
|
| 345 |
+
element.decompose()
|
| 346 |
+
|
| 347 |
+
# Convert cleaned HTML to markdown
|
| 348 |
+
cleaned_html = str(soup.body) if soup.body else str(soup)
|
| 349 |
+
markdown_content = markdownify(cleaned_html, strip=['a']) # Optional: strip links
|
| 350 |
+
|
| 351 |
+
return f"## Extracted Content from {url}\n\n{markdown_content[:15000]}" # Limit length
|
| 352 |
+
|
| 353 |
+
except requests.exceptions.RequestException as e:
|
| 354 |
+
return f"HTTP error in web_content_extract: {str(e)}"
|
| 355 |
+
except Exception as e:
|
| 356 |
+
return f"Unexpected error in web_content_extract: {str(e)}"
|
| 357 |
+
|
| 358 |
+
import os
|
| 359 |
+
from langchain_openai import OpenAIEmbeddings
|
| 360 |
+
from langchain_community.vectorstores import FAISS
|
| 361 |
+
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
| 362 |
+
|
| 363 |
+
@tool
|
| 364 |
+
def extract_answer_from_content(content: str | dict, query: str) -> str:
|
| 365 |
+
"""
|
| 366 |
+
Extract relevant information from content based on user query.
|
| 367 |
+
|
| 368 |
+
Args:
|
| 369 |
+
content (str/dict): Raw text or transcribed test from audio or structured content from any source
|
| 370 |
+
query (str): Natural language question to answer
|
| 371 |
+
|
| 372 |
+
Returns:
|
| 373 |
+
str: Concise answer extracted from content
|
| 374 |
+
"""
|
| 375 |
+
try:
|
| 376 |
+
# Normalize content format
|
| 377 |
+
if isinstance(content, dict):
|
| 378 |
+
text_content = ""
|
| 379 |
+
if "summary" in content:
|
| 380 |
+
text_content += f"SUMMARY: {content['summary']}\n\n"
|
| 381 |
+
if "infobox" in content:
|
| 382 |
+
text_content += "INFOBOX:\n"
|
| 383 |
+
for k, v in content["infobox"].items():
|
| 384 |
+
text_content += f"{k}: {v}\n"
|
| 385 |
+
text_content += "\n"
|
| 386 |
+
if "sections" in content:
|
| 387 |
+
for section, text in content["sections"].items():
|
| 388 |
+
text_content += f"{section}:\n{text}\n\n"
|
| 389 |
+
else:
|
| 390 |
+
text_content = content
|
| 391 |
+
|
| 392 |
+
# Initialize OpenAI embeddings
|
| 393 |
+
embeddings = OpenAIEmbeddings(
|
| 394 |
+
openai_api_key=os.getenv("OPENAI_API_KEY"),
|
| 395 |
+
model="text-embedding-3-large"
|
| 396 |
+
)
|
| 397 |
+
|
| 398 |
+
# Split content into manageable chunks
|
| 399 |
+
text_splitter = RecursiveCharacterTextSplitter(
|
| 400 |
+
chunk_size=500,
|
| 401 |
+
chunk_overlap=100
|
| 402 |
+
)
|
| 403 |
+
chunks = text_splitter.split_text(text_content)
|
| 404 |
+
|
| 405 |
+
# Create vector store
|
| 406 |
+
vectorstore = FAISS.from_texts(chunks, embeddings)
|
| 407 |
+
retriever = vectorstore.as_retriever(search_kwargs={"k": 3})
|
| 408 |
+
|
| 409 |
+
# Get most relevant content
|
| 410 |
+
relevant_docs = retriever.invoke(query)
|
| 411 |
+
combined_text = " ".join([doc.page_content for doc in relevant_docs])
|
| 412 |
+
|
| 413 |
+
# Return relevant content with context
|
| 414 |
+
return f"Relevant information found:\n{combined_text[:1500]}"
|
| 415 |
+
|
| 416 |
+
except Exception as e:
|
| 417 |
+
return f"Content extraction failed: {str(e)}"
|
| 418 |
+
|
| 419 |
+
import os
|
| 420 |
+
import requests
|
| 421 |
+
from openai import OpenAI
|
| 422 |
+
|
| 423 |
+
@tool
|
| 424 |
+
def transcribe_audio(source: str, file_extension: str) -> str:
|
| 425 |
+
"""
|
| 426 |
+
Transcribes an audio to text from local path or URL.
|
| 427 |
+
Args:
|
| 428 |
+
source (str): URL to an audio file.
|
| 429 |
+
|
| 430 |
+
Returns:
|
| 431 |
+
str: The transcribed text, or error message.
|
| 432 |
+
"""
|
| 433 |
+
# If file is not existing use download_file_from_url tool to download the file first.
|
| 434 |
+
|
| 435 |
+
client = OpenAI()
|
| 436 |
+
|
| 437 |
+
try:
|
| 438 |
+
# download the audio file
|
| 439 |
+
response = requests.get(source)
|
| 440 |
+
response.raise_for_status()
|
| 441 |
+
# write to disk
|
| 442 |
+
file_extension = file_extension.replace('.','')
|
| 443 |
+
with open(f'tmp.{file_extension}', 'wb') as file:
|
| 444 |
+
file.write(response.content)
|
| 445 |
+
|
| 446 |
+
audio_file = open(f'tmp.{file_extension}', "rb")
|
| 447 |
+
client = OpenAI()
|
| 448 |
+
transcription = client.audio.transcriptions.create(
|
| 449 |
+
model="whisper-1",
|
| 450 |
+
file=audio_file
|
| 451 |
+
)
|
| 452 |
+
return transcription.text
|
| 453 |
+
|
| 454 |
+
except Exception as e:
|
| 455 |
+
return f"Transcription error: {str(e)}"
|
| 456 |
+
|
| 457 |
+
from youtube_transcript_api import YouTubeTranscriptApi
|
| 458 |
+
from pytube import extract
|
| 459 |
+
|
| 460 |
+
@tool
|
| 461 |
+
def get_youtube_transcript(page_url: str) -> str:
|
| 462 |
+
"""Get the transcript of audio component of YouTube video.
|
| 463 |
+
Use this for Youtube videos with available transcripts
|
| 464 |
+
Args:
|
| 465 |
+
page_url (str): YouTube URL of the video
|
| 466 |
+
"""
|
| 467 |
+
try:
|
| 468 |
+
# Get video ID from URL
|
| 469 |
+
video_id = extract.video_id(page_url)
|
| 470 |
+
|
| 471 |
+
# Get transcript using correct method
|
| 472 |
+
transcript = YouTubeTranscriptApi.get_transcript(video_id)
|
| 473 |
+
|
| 474 |
+
# Return concatenated text
|
| 475 |
+
return '\n'.join([s['text'] for s in transcript])
|
| 476 |
+
|
| 477 |
+
except Exception as e:
|
| 478 |
+
return f"get_youtube_transcript failed: {str(e)}"
|
| 479 |
+
|
| 480 |
+
from tabulate import tabulate
|
| 481 |
+
from typing import Dict, Any, List
|
| 482 |
+
|
| 483 |
+
@tool
|
| 484 |
+
def generate_table_from_data(data: List[Dict[str, Any]]) -> str:
|
| 485 |
+
"""
|
| 486 |
+
Convert list of dictionaries to markdown table
|
| 487 |
+
|
| 488 |
+
Args:
|
| 489 |
+
data (List[Dict]): List of objects with common keys
|
| 490 |
+
|
| 491 |
+
Returns:
|
| 492 |
+
str: Markdown-formatted table
|
| 493 |
+
"""
|
| 494 |
+
if not data:
|
| 495 |
+
return "No data available"
|
| 496 |
+
|
| 497 |
+
headers = data[0].keys()
|
| 498 |
+
rows = [list(item.values()) for item in data]
|
| 499 |
+
|
| 500 |
+
return tabulate(rows, headers=headers, tablefmt="pipe")
|
| 501 |
+
|
| 502 |
+
from pydantic import BaseModel, Field
|
| 503 |
+
from typing import List, Dict
|
| 504 |
+
|
| 505 |
+
class CommutativeCheckInput(BaseModel):
|
| 506 |
+
table_str: str = Field(..., description="Markdown-formatted string of the operation table (e.g., |*|a|b|c|...)")
|
| 507 |
+
elements: List[str] = Field(..., description="List of elements in the set S")
|
| 508 |
+
|
| 509 |
+
@tool(args_schema=CommutativeCheckInput)
|
| 510 |
+
def check_commutative(table_str: str, elements: List[str]) -> str:
|
| 511 |
+
"""
|
| 512 |
+
Analyzes a binary operation table for commutativity.
|
| 513 |
+
|
| 514 |
+
Args:
|
| 515 |
+
table_str (str): Markdown-formatted string of the operation table.
|
| 516 |
+
elements (List[str]): List of elements in the set S.
|
| 517 |
+
|
| 518 |
+
Returns:
|
| 519 |
+
str: Comma-separated list of element pairs (e.g., "b,e") where x*y ≠ y*x.
|
| 520 |
+
"""
|
| 521 |
+
|
| 522 |
+
# Parse the table string into a 2D list
|
| 523 |
+
lines = [line.strip() for line in table_str.strip().split('\n') if line.strip()]
|
| 524 |
+
header = [cell.strip() for cell in lines[0].split('|') if cell.strip()][1:] # Skip the first cell (operator)
|
| 525 |
+
rows = []
|
| 526 |
+
for line in lines[2:]:
|
| 527 |
+
cells = [cell.strip() for cell in line.split('|') if cell.strip()] # Remove empty cells
|
| 528 |
+
if cells:
|
| 529 |
+
rows.append(cells)
|
| 530 |
+
|
| 531 |
+
|
| 532 |
+
# Validate that all rows have the correct number of cells
|
| 533 |
+
expected_length = len(header) + 1 # x + one for each header
|
| 534 |
+
for row in rows:
|
| 535 |
+
if len(row) < expected_length:
|
| 536 |
+
return f"Error: Row '{row[0]}' has {len(row)} cells, but expected {expected_length}."
|
| 537 |
+
|
| 538 |
+
# Build a dictionary for the operation: op[x][y] = result
|
| 539 |
+
operation: Dict[str, Dict[str, str]] = {}
|
| 540 |
+
for row in rows:
|
| 541 |
+
x = row[0]
|
| 542 |
+
operation[x] = {}
|
| 543 |
+
for i, y in enumerate(header):
|
| 544 |
+
operation[x][y] = row[i + 1]
|
| 545 |
+
|
| 546 |
+
# Check all pairs (x, y) for x*y == y*x
|
| 547 |
+
counterexamples = []
|
| 548 |
+
for x in elements:
|
| 549 |
+
for y in elements:
|
| 550 |
+
if x < y: # Avoid redundant checks and self-comparison
|
| 551 |
+
try:
|
| 552 |
+
xy = operation[x][y]
|
| 553 |
+
yx = operation[y][x]
|
| 554 |
+
if xy != yx:
|
| 555 |
+
counterexamples.append(f"{x},{y}")
|
| 556 |
+
except KeyError as e:
|
| 557 |
+
return f"Error: Missing data for pair ({x}, {y}) in table."
|
| 558 |
+
|
| 559 |
+
return "\n".join(counterexamples) if counterexamples else "The operation is commutative."
|
langgraph_dir/prompt.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# customized GAIA system prompt
|
| 2 |
+
system_prompt = """\
|
| 3 |
+
You are a general AI assistant with tools.
|
| 4 |
+
I will ask you a question. Use your tools, and answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER]. \
|
| 5 |
+
YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings.
|
| 6 |
+
If your answer is an instruction, you need to perform that instruction too, until you give an answer that is not an instruction.
|
| 7 |
+
If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise.
|
| 8 |
+
If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise.
|
| 9 |
+
If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.
|
| 10 |
+
If you decide to write a code, use the python_repl tool to write code and run it.
|
| 11 |
+
"""
|
requirements.txt
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio
|
| 2 |
+
requests
|
| 3 |
+
pandas
|
| 4 |
+
tqdm
|
| 5 |
+
datasets
|
| 6 |
+
langchain
|
| 7 |
+
langgraph
|
| 8 |
+
langchain-openai
|
| 9 |
+
langchain-community
|
| 10 |
+
langchain-tavily
|
| 11 |
+
langchain_experimental
|
| 12 |
+
easyocr
|
| 13 |
+
Pillow
|
| 14 |
+
openai
|
| 15 |
+
pydantic
|
| 16 |
+
beautifulsoup4
|
| 17 |
+
PyPDF2
|
| 18 |
+
markdownify
|
| 19 |
+
youtube-transcript-api
|
| 20 |
+
pytube
|
| 21 |
+
tabulate
|
| 22 |
+
e2b
|
| 23 |
+
duckduckgo-search
|