rohit-97535470279's picture
Rename ai debugger using langraph.py to app.py
8401191 verified
# bughunter_langgraph_gradio.py
import cv2
import numpy as np
import io
import sys
import gradio as gr
from PIL import Image
import re
import torch
from typing import Optional, Dict, Any
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import easyocr
from langgraph.graph import StateGraph, END
import os
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_PROJECT"] = "web_Scrapping_agent21"
os.environ["LANGCHAIN_API_KEY"] = "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"
# ---------------- CONFIGURATION ----------------
MODEL_NAME = "Qwen/Qwen2.5-Coder-1.5B-Instruct"
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype="auto")
device = 0 if torch.cuda.is_available() else -1
generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=device)
reader = easyocr.Reader(['en'], gpu=False)
# ---------------- STATE ----------------
class DebuggerState(Dict[str, Any]):
image: Optional[Image.Image]
code_text: Optional[str]
extracted_code: Optional[str]
ai_response: Optional[str]
frame: Optional[np.ndarray]
stdout: Optional[str]
stderr: Optional[str]
error_lines: Optional[list]
# ---------------- NODES ----------------
def extract_code_from_image(state: DebuggerState):
"""Extract code text from uploaded image."""
if not state.get("image"):
return state
image = state["image"]
frame = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
result = reader.readtext(np.array(image))
code_text = "\n".join([res[1] for res in result])
state["extracted_code"] = code_text
state["frame"] = frame
return state
def run_code_locally(state: DebuggerState):
"""Execute user or OCR code locally and capture output."""
code = state.get("code_text") or state.get("extracted_code", "")
if not code.strip():
state["stderr"] = ":warning: No code provided."
return state
stdout_buf, stderr_buf = io.StringIO(), io.StringIO()
sys_stdout, sys_stderr = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = stdout_buf, stderr_buf
exec(code, {})
except Exception as e:
print(f"Error: {e}", file=sys.stderr)
finally:
sys.stdout, sys.stderr = sys_stdout, sys_stderr
state["stdout"], state["stderr"] = stdout_buf.getvalue(), stderr_buf.getvalue()
return state
def extract_error_lines(state: DebuggerState):
"""Extract error line numbers from stderr."""
stderr = state.get("stderr", "")
lines = []
for match in re.finditer(r'File "<string>", line (\d+)', stderr):
lines.append(int(match.group(1)))
state["error_lines"] = lines
return state
def ask_ai_debugger(state: DebuggerState):
"""Ask Qwen model to analyze and fix the code."""
code = state.get("code_text") or state.get("extracted_code", "")
error_output = state.get("stderr", "") or state.get("stdout", "")
prompt = f"""
You are an expert AI code debugger.
Analyze the following code and any error output:
1. Identify what went wrong and explain why.
2. Provide a corrected version of the code.
3. Explain why the fix works.
Language: python
User Code:
{code}
"""
if error_output:
prompt += f"\nError Output:\n```\n{error_output}\n```"
try:
response = generator(prompt, max_new_tokens=500, do_sample=True)[0]['generated_text']
except Exception as e:
response = f":warning: Model inference failed: {e}"
state["ai_response"] = response
return state
def highlight_code_errors(state: DebuggerState):
"""Highlight lines in the image corresponding to errors."""
frame = state.get("frame")
code = state.get("extracted_code") or state.get("code_text")
if frame is None or not code:
return state
code_lines = code.splitlines()
num_lines = len(code_lines)
line_height = max(frame.shape[0] // max(num_lines, 1), 20)
error_lines = state.get("error_lines", [])
stderr = state.get("stderr", "")
for i, line in enumerate(code_lines):
y1 = i * line_height
y2 = y1 + line_height
if (i + 1) in error_lines:
color = (0, 0, 255) # Red for errors
elif stderr == "" and line.strip():
color = (0, 255, 0) # Green for success
else:
color = (200, 200, 200) # Gray for neutral
cv2.rectangle(frame, (0, y1), (frame.shape[1], y2), color, 2)
state["frame"] = frame
return state
# ---------------- GRAPH DEFINITION ----------------
graph = StateGraph(DebuggerState)
graph.add_node("extract_code_from_image", extract_code_from_image)
graph.add_node("run_code_locally", run_code_locally)
graph.add_node("extract_error_lines", extract_error_lines)
graph.add_node("ask_ai_debugger", ask_ai_debugger)
graph.add_node("highlight_code_errors", highlight_code_errors)
# Connect nodes
graph.add_edge("extract_code_from_image", "run_code_locally")
graph.add_edge("run_code_locally", "extract_error_lines")
graph.add_edge("extract_error_lines", "ask_ai_debugger")
graph.add_edge("ask_ai_debugger", "highlight_code_errors")
graph.add_edge("highlight_code_errors", END)
graph.set_entry_point("extract_code_from_image")
# ---------------- GRADIO FUNCTION ----------------
def process_with_langgraph(image: Optional[Image.Image], code_text: Optional[str]):
input_state = {
"image": image,
"code_text": code_text
}
# Compile the graph
app = graph.compile()
# Invoke the graph with the initial state
final_state = app.invoke(input_state)
frame = final_state.get("frame")
ai_response = final_state.get("ai_response", ":warning: No response.")
return (frame if frame is not None else None), ai_response
# ---------------- GRADIO UI ----------------
with gr.Blocks() as demo:
gr.Markdown(
"""
# 🧠 BugHunter LangGraph AI
### AI-powered Visual Debugger built with LangGraph + Qwen
Upload a code screenshot or paste your code to get instant analysis and fixes.
"""
)
with gr.Row():
img_input = gr.Image(label="πŸ“Έ Upload Code Screenshot", type="pil")
code_input = gr.Textbox(label="πŸ’» Or Paste Your Code", lines=15, placeholder="Paste your Python code here...")
with gr.Row():
img_output = gr.Image(label="πŸ” Highlighted Code", type="numpy")
ai_output = gr.Textbox(label="πŸ€– AI Debugger Suggestion", lines=12)
btn = gr.Button("πŸš€ Analyze Code")
btn.click(fn=process_with_langgraph, inputs=[img_input, code_input], outputs=[img_output, ai_output])
demo.launch(share=True)