File size: 2,370 Bytes
f0496c5
 
 
 
 
af7e71c
f0496c5
d04eb15
 
 
5679f55
 
 
d04eb15
 
 
f0496c5
 
 
 
 
 
 
2a2872c
f0496c5
 
 
 
 
 
 
 
 
af7e71c
f0496c5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74d88b9
f0496c5
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
import torch
from transformers import pipeline
from transformers import AutoProcessor, AutoModelForZeroShotObjectDetection
import numpy as np
import draw_utils
import spaces

from huggingface_hub import login
import os

for variable_name in os.environ.keys():
    print(variable_name)

login(token=os.environ.get('gemma_access_token'))


device = 'cuda' if torch.cuda.is_available() else 'cpu'


pipe = pipeline(
    "image-text-to-text",
    model="google/gemma-3-4b-it",
    #device="cuda:1",
    device_map=device,
    torch_dtype=torch.bfloat16

)


model_id = "IDEA-Research/grounding-dino-tiny"
processor = AutoProcessor.from_pretrained(model_id)
model = AutoModelForZeroShotObjectDetection.from_pretrained(model_id).to(device)

@spaces.GPU
def laod_gdino(image):
    messages = [
            {
                "role": "system",
                "content": [{"type": "text", "text": "Just Give the list of objects in given picture seperated by comma. Do not write anything else."}]},
            {
                "role": "user",
                "content": [
                    {"type": "text", "text": "List the objects that you see in given picture."},
                    {"type": "image", "url": image},

                ]
            },

        ]


    output = pipe(text=messages, max_new_tokens=500)
    print(output[0]["generated_text"][-1]["content"])

    llm_response = output[0]["generated_text"][-1]["content"]

    llm_response = llm_response.lower()
    llm_response = llm_response.replace('pedestrian', 'person')
    llm_response = llm_response.replace('people', 'person')
    llm_response = llm_response.replace('man', 'person')
    llm_response = llm_response.replace('woman', 'person')

    llm_labels = llm_response.replace(', ', ',').split(',')

    print(llm_labels)

    llm_labels = [llm_labels]

    inputs = processor(images=image, text=llm_labels, return_tensors="pt").to(device)
    with torch.no_grad():
        outputs = model(**inputs)

    results = processor.post_process_grounded_object_detection(
        outputs,
        inputs.input_ids,
        threshold=0.4,
        text_threshold=0.3,
        target_sizes=[image.size[::-1]]
    )

    result = results[0]
    image = np.array(image)

    draw_results = [result["boxes"], result["scores"], result["labels"]]
    return draw_utils.visualize_detections(image, draw_results)