Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -3,15 +3,29 @@ import requests
|
|
| 3 |
import os
|
| 4 |
|
| 5 |
HF_API_KEY = os.getenv('TOKEN')
|
| 6 |
-
MODEL_NAME = "deepseek-ai/DeepSeek-R1-Distill-Llama-8B"
|
| 7 |
|
| 8 |
def chat(prompt):
|
|
|
|
| 9 |
response = requests.post(
|
| 10 |
f"https://api-inference.huggingface.co/models/{MODEL_NAME}",
|
| 11 |
headers={"Authorization": f"Bearer {HF_API_KEY}"},
|
| 12 |
json={"inputs": prompt}
|
| 13 |
)
|
| 14 |
-
|
|
|
|
|
|
|
| 15 |
|
| 16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
iface.launch()
|
|
|
|
| 3 |
import os
|
| 4 |
|
| 5 |
HF_API_KEY = os.getenv('TOKEN')
|
| 6 |
+
MODEL_NAME = "deepseek-ai/DeepSeek-R1-Distill-Llama-8B"
|
| 7 |
|
| 8 |
def chat(prompt):
|
| 9 |
+
# Make the API request
|
| 10 |
response = requests.post(
|
| 11 |
f"https://api-inference.huggingface.co/models/{MODEL_NAME}",
|
| 12 |
headers={"Authorization": f"Bearer {HF_API_KEY}"},
|
| 13 |
json={"inputs": prompt}
|
| 14 |
)
|
| 15 |
+
|
| 16 |
+
# Debug: print the raw response to see the actual structure
|
| 17 |
+
print(response.json()) # This will help you inspect the returned data
|
| 18 |
|
| 19 |
+
# Safe check: return the response if it's structured as expected
|
| 20 |
+
try:
|
| 21 |
+
# Try accessing the 'generated_text' field, assuming a list-like response
|
| 22 |
+
generated_text = response.json()[0]["generated_text"]
|
| 23 |
+
return generated_text
|
| 24 |
+
except (KeyError, IndexError) as e:
|
| 25 |
+
# Handle the case where the structure is different
|
| 26 |
+
print(f"Error: {e}")
|
| 27 |
+
return f"Error processing response: {response.json()}"
|
| 28 |
+
|
| 29 |
+
# Set up Gradio interface
|
| 30 |
+
iface = gr.Interface(fn=chat, inputs="text", outputs="text", title="DeepSeek 7B Chatbot (API)")
|
| 31 |
iface.launch()
|