Update README.md
Browse files
README.md
CHANGED
|
@@ -160,8 +160,6 @@ def get_current_weather(location: str, format: str):
|
|
| 160 |
"""
|
| 161 |
pass
|
| 162 |
|
| 163 |
-
test_chat.append()
|
| 164 |
-
|
| 165 |
conversation = [{"role": "user", "content": "What's the weather like in Paris?"}]
|
| 166 |
tools = [get_current_weather]
|
| 167 |
|
|
@@ -172,10 +170,11 @@ tool_use_prompt = tokenizer.apply_chat_template(
|
|
| 172 |
tokenize=False,
|
| 173 |
add_generation_prompt=True,
|
| 174 |
)
|
| 175 |
-
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="auto")
|
| 176 |
|
| 177 |
inputs = tokenizer(tool_use_prompt, return_tensors="pt")
|
| 178 |
|
|
|
|
|
|
|
| 179 |
outputs = model.generate(**inputs, max_new_tokens=1000)
|
| 180 |
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|
| 181 |
```
|
|
|
|
| 160 |
"""
|
| 161 |
pass
|
| 162 |
|
|
|
|
|
|
|
| 163 |
conversation = [{"role": "user", "content": "What's the weather like in Paris?"}]
|
| 164 |
tools = [get_current_weather]
|
| 165 |
|
|
|
|
| 170 |
tokenize=False,
|
| 171 |
add_generation_prompt=True,
|
| 172 |
)
|
|
|
|
| 173 |
|
| 174 |
inputs = tokenizer(tool_use_prompt, return_tensors="pt")
|
| 175 |
|
| 176 |
+
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="auto")
|
| 177 |
+
|
| 178 |
outputs = model.generate(**inputs, max_new_tokens=1000)
|
| 179 |
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|
| 180 |
```
|