Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -2,6 +2,7 @@ import gradio as gr
|
|
| 2 |
import torch
|
| 3 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 4 |
import os
|
|
|
|
| 5 |
|
| 6 |
MODEL_NAME = "swiss-ai/Apertus-8B-Instruct-2509"
|
| 7 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
@@ -11,6 +12,7 @@ HF_TOKEN = os.getenv("HF_TOKEN")
|
|
| 11 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, token=HF_TOKEN)
|
| 12 |
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, token=HF_TOKEN).to(device)
|
| 13 |
|
|
|
|
| 14 |
def predict(message, history):
|
| 15 |
messages = []
|
| 16 |
|
|
@@ -53,4 +55,4 @@ chatbot = gr.ChatInterface(
|
|
| 53 |
)
|
| 54 |
|
| 55 |
# Launch the app
|
| 56 |
-
chatbot.launch()
|
|
|
|
| 2 |
import torch
|
| 3 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 4 |
import os
|
| 5 |
+
import spaces
|
| 6 |
|
| 7 |
MODEL_NAME = "swiss-ai/Apertus-8B-Instruct-2509"
|
| 8 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
|
| 12 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, token=HF_TOKEN)
|
| 13 |
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, token=HF_TOKEN).to(device)
|
| 14 |
|
| 15 |
+
@spaces.GPU
|
| 16 |
def predict(message, history):
|
| 17 |
messages = []
|
| 18 |
|
|
|
|
| 55 |
)
|
| 56 |
|
| 57 |
# Launch the app
|
| 58 |
+
chatbot.launch(share=True, server_port=80, enable_queue=True)
|