File size: 1,311 Bytes
f0496c5
 
 
117edfc
 
 
5679f55
 
 
 
117edfc
 
 
 
f0496c5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import gradio as gr
from laod_pipeline import laod_gdino

from huggingface_hub import login
import os

for variable_name in os.environ.keys():
    print(variable_name)


login(token=os.environ.get('gemma_access_token'))



examples =[['images/1.jpg'], ['images/2.jpg'], ['images/3.jpg']]
title = "LAOD: LLM-Guided Agentic Object Detection for Open-World Understanding"

# --- HTML/CSS for Centered Horizontal Buttons ---
# We use a div with Flexbox to center the buttons and add a gap between them.
description = """
<div style="display: flex; justify-content: center; align-items: center; text-align: center; gap: 15px;">
  <p style="margin: 0;">For more details:</p>
  <a href="https://github.com/furkanmumcu/LAOD" target="_blank">
    <img src="https://img.shields.io/badge/GitHub-Repo-blue?style=for-the-badge&logo=github" alt="GitHub Repo">
  </a>
  <a href="https://arxiv.org/abs/2507.10844" target="_blank">
    <img src="https://img.shields.io/badge/arXiv-Paper-b31b1b?style=for-the-badge&logo=arxiv" alt="arXiv Paper">
  </a>
</div>
"""

demo = gr.Interface(
    fn=laod_gdino,
    inputs=gr.Image(label="Upload an Image", type="pil"),
    outputs=gr.Image(label="Output"),
    examples=examples,
    title=title,
    description=description
)

# To run the app, uncomment the line below
demo.launch()