| import os | |
| import gradio as gr | |
| import logging | |
| from typing import Dict, List | |
| from huggingface_hub import InferenceClient | |
| from llama_index.core.tools import FunctionTool | |
| from duckduckgo_search import DDGS | |
| from sentence_transformers import SentenceTransformer | |
| from sklearn.metrics.pairwise import cosine_similarity | |
| from llama_index.llms.huggingface import HuggingFaceLLM | |
| from llama_index.core.agent import ReActAgent | |
| import numpy as np | |
| import ast | |
| import operator as op | |
| logging.basicConfig(level=logging.INFO) | |
| logger = logging.getLogger(__name__) | |
| HF_TOKEN = os.environ.get("HF_TOKEN") | |
| llm = HuggingFaceLLM( | |
| context_window=4096, | |
| max_new_tokens=512, | |
| generate_kwargs={"temperature": 0.7, "top_p": 0.95}, | |
| tokenizer_name="Gensyn/Qwen2.5-0.5B-Instruct", | |
| model_name="Gensyn/Qwen2.5-0.5B-Instruct", | |
| ) | |
| class QuestionValidation: | |
| def __init__(self, llm_client): | |
| self.client = llm_client | |
| self.embedding_model = SentenceTransformer("all-MiniLM-L6-v2") | |
| def guess_question(self, answer: str) -> str: | |
| prompt = f"This was the answer: {answer}\nWhat question would likely have led to it?" | |
| return self.client.complete(prompt).text.strip() | |
| def compute_similarity(self, q1: str, q2: str) -> float: | |
| embeddings = self.embedding_model.encode([q1, q2]) | |
| return cosine_similarity([embeddings[0]], [embeddings[1]])[0][0] | |
| def validate_question_only(self, original_question: str, guessed_question: str) -> Dict[str, float]: | |
| similarity = self.compute_similarity(original_question, guessed_question) | |
| return { | |
| "original_question": original_question, | |
| "guessed_question": guessed_question, | |
| "similarity": round(float(similarity), 4) | |
| } | |
| def search_web(query: str, max_results: int = 5) -> List[Dict[str, str]]: | |
| try: | |
| with DDGS() as ddgs: | |
| return [r for r in ddgs.text(query, max_results=max_results)] | |
| except Exception as e: | |
| return [{"error": str(e)}] | |
| OPERATORS = { | |
| ast.Add: op.add, | |
| ast.Sub: op.sub, | |
| ast.Mult: op.mul, | |
| ast.Div: op.truediv, | |
| ast.Mod: op.mod, | |
| ast.Pow: op.pow, | |
| ast.USub: op.neg, | |
| ast.UAdd: op.pos, | |
| ast.FloorDiv: op.floordiv, | |
| } | |
| def evaluate_math_expression(expr: str) -> str: | |
| try: | |
| node = ast.parse(expr, mode="eval") | |
| def _eval(node): | |
| if isinstance(node, ast.Expression): | |
| return _eval(node.body) | |
| elif isinstance(node, ast.Constant): | |
| return node.value | |
| elif isinstance(node, ast.BinOp): | |
| return OPERATORS[type(node.op)](_eval(node.left), _eval(node.right)) | |
| elif isinstance(node, ast.UnaryOp): | |
| return OPERATORS[type(node.op)](_eval(node.operand)) | |
| else: | |
| raise ValueError(f"Unsupported expression: {ast.dump(node)}") | |
| return str(_eval(node)) | |
| except Exception as e: | |
| return f"Error evaluating expression: {e}" | |
| validator = QuestionValidation(llm) | |
| validate_tool = FunctionTool.from_defaults( | |
| fn=validator.validate_question_only, | |
| name="validate_question", | |
| description="Compares the similarity between two questions." | |
| ) | |
| search_tool = FunctionTool.from_defaults( | |
| fn=search_web, | |
| name="search_web", | |
| description="Searches the web using DuckDuckGo and returns results." | |
| ) | |
| math_tool = FunctionTool.from_defaults( | |
| fn=evaluate_math_expression, | |
| name="math_tool", | |
| description="Evaluates a basic Python math expression." | |
| ) | |
| TOOLS = [validate_tool, search_tool, math_tool] | |
| agent = ReActAgent.from_tools( | |
| tools=TOOLS, | |
| llm=llm, | |
| verbose=True, | |
| max_iterations=3 | |
| ) | |
| def respond(message: str, history: List[List[str]]) -> str: | |
| response = agent.chat(message) | |
| return response.response | |
| with gr.Blocks() as app: | |
| gr.ChatInterface( | |
| respond, | |
| chatbot=gr.Chatbot(), | |
| title="Agent", | |
| description="Ask me anything — math, web search, or guessing a question by an LLM.", | |
| ) | |
| if __name__ == "__main__": | |
| app.launch() | |