How to test the model myself?
#1
by
Thisusernamealreadyexists00
- opened
I have tried this code in colab:
!pip install -q transformers accelerate torchdiffeq
from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
import torch
model_name = "silx-ai/TARS-1B"
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
model_name,
trust_remote_code=True, # required for custom LNN classes
torch_dtype=torch.float32, # config says weights are F32
device_map=None, # fixes loading error
)
if torch.cuda.is_available():
model = model.to("cuda")
prompt = "Hi. How are you?"
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
outputs = model.generate(
**inputs,
max_new_tokens=100,
do_sample=True,
temperature=1.2, # more randomness
top_k=50, # wider token sampling
top_p=0.95,
repetition_penalty=1.2
)
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
# outputs: Hi. How are you?????????????????????????????????????????????????????????????????????????????????????????????????????
# repeating last character, idk whats going on