sam2ai commited on
Commit
eaadaf2
·
verified ·
1 Parent(s): 6b6f47e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -4
app.py CHANGED
@@ -2,7 +2,7 @@ from threading import Thread
2
  from typing import Dict
3
 
4
  import gradio as gr
5
- # import spaces
6
  import torch
7
  from PIL import Image
8
  from transformers import AutoModelForVision2Seq, AutoProcessor, AutoTokenizer, TextIteratorStreamer
@@ -28,7 +28,7 @@ processor = AutoProcessor.from_pretrained(model_id)
28
  model = AutoModelForVision2Seq.from_pretrained(model_id, torch_dtype="auto", device_map="auto")
29
 
30
 
31
- # @spaces.GPU
32
  def stream_chat(message: Dict[str, str], history: list):
33
  # Turn 1:
34
  # {'text': 'what is this', 'files': ['image-xxx.jpg']}
@@ -39,7 +39,6 @@ def stream_chat(message: Dict[str, str], history: list):
39
  # [[('image-xxx.jpg',), None], ['what is this', 'a image.']]
40
 
41
  image_path = None
42
- # print(message.files[0].path)
43
  if len(message.files) != 0:
44
  image_path = message.files[0].path
45
 
@@ -101,4 +100,3 @@ with gr.Blocks(css=CSS) as demo:
101
 
102
  if __name__ == "__main__":
103
  demo.launch()
104
-
 
2
  from typing import Dict
3
 
4
  import gradio as gr
5
+ import spaces
6
  import torch
7
  from PIL import Image
8
  from transformers import AutoModelForVision2Seq, AutoProcessor, AutoTokenizer, TextIteratorStreamer
 
28
  model = AutoModelForVision2Seq.from_pretrained(model_id, torch_dtype="auto", device_map="auto")
29
 
30
 
31
+ @spaces.GPU
32
  def stream_chat(message: Dict[str, str], history: list):
33
  # Turn 1:
34
  # {'text': 'what is this', 'files': ['image-xxx.jpg']}
 
39
  # [[('image-xxx.jpg',), None], ['what is this', 'a image.']]
40
 
41
  image_path = None
 
42
  if len(message.files) != 0:
43
  image_path = message.files[0].path
44
 
 
100
 
101
  if __name__ == "__main__":
102
  demo.launch()