Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -29,9 +29,12 @@ async def generate(request: Request):
|
|
| 29 |
if not prompt:
|
| 30 |
return {"error": "Thiếu prompt!"}
|
| 31 |
|
|
|
|
|
|
|
|
|
|
| 32 |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
| 33 |
outputs = model.generate(**inputs,
|
| 34 |
-
max_new_tokens=50,temperature=0.
|
| 35 |
)
|
| 36 |
result = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 37 |
return {"response": result}
|
|
|
|
| 29 |
if not prompt:
|
| 30 |
return {"error": "Thiếu prompt!"}
|
| 31 |
|
| 32 |
+
# Thêm quy định vào prompt
|
| 33 |
+
final_prompt = f"Trả lời bằng tiếng Việt, ngắn gọn: {data.prompt}"
|
| 34 |
+
|
| 35 |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
| 36 |
outputs = model.generate(**inputs,
|
| 37 |
+
max_new_tokens=50,temperature=0.5,top_p=0.9,repetition_penalty=1.2,do_sample= False,eos_token_id=tokenizer.eos_token_id
|
| 38 |
)
|
| 39 |
result = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 40 |
return {"response": result}
|