narySt commited on
Commit
dbcb78f
·
verified ·
1 Parent(s): 2a93bf8

Добавлены веса модели

Browse files

Загрузка через Python API

This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +40 -0
  2. just_lora_weights/checkpoint-100/README.md +202 -0
  3. just_lora_weights/checkpoint-100/adapter_config.json +34 -0
  4. just_lora_weights/checkpoint-100/adapter_model.safetensors +3 -0
  5. just_lora_weights/checkpoint-100/added_tokens.json +28 -0
  6. just_lora_weights/checkpoint-100/chat_template.jinja +89 -0
  7. just_lora_weights/checkpoint-100/merges.txt +0 -0
  8. just_lora_weights/checkpoint-100/optimizer.pt +3 -0
  9. just_lora_weights/checkpoint-100/rng_state.pth +3 -0
  10. just_lora_weights/checkpoint-100/scheduler.pt +3 -0
  11. just_lora_weights/checkpoint-100/special_tokens_map.json +31 -0
  12. just_lora_weights/checkpoint-100/tokenizer.json +3 -0
  13. just_lora_weights/checkpoint-100/tokenizer_config.json +239 -0
  14. just_lora_weights/checkpoint-100/trainer_state.json +174 -0
  15. just_lora_weights/checkpoint-100/training_args.bin +3 -0
  16. just_lora_weights/checkpoint-100/vocab.json +0 -0
  17. just_lora_weights/checkpoint-1000/README.md +202 -0
  18. just_lora_weights/checkpoint-1000/adapter_config.json +34 -0
  19. just_lora_weights/checkpoint-1000/adapter_model.safetensors +3 -0
  20. just_lora_weights/checkpoint-1000/added_tokens.json +28 -0
  21. just_lora_weights/checkpoint-1000/chat_template.jinja +89 -0
  22. just_lora_weights/checkpoint-1000/merges.txt +0 -0
  23. just_lora_weights/checkpoint-1000/optimizer.pt +3 -0
  24. just_lora_weights/checkpoint-1000/rng_state.pth +3 -0
  25. just_lora_weights/checkpoint-1000/scheduler.pt +3 -0
  26. just_lora_weights/checkpoint-1000/special_tokens_map.json +31 -0
  27. just_lora_weights/checkpoint-1000/tokenizer.json +3 -0
  28. just_lora_weights/checkpoint-1000/tokenizer_config.json +239 -0
  29. just_lora_weights/checkpoint-1000/trainer_state.json +1434 -0
  30. just_lora_weights/checkpoint-1000/training_args.bin +3 -0
  31. just_lora_weights/checkpoint-1000/vocab.json +0 -0
  32. just_lora_weights/checkpoint-125/README.md +202 -0
  33. just_lora_weights/checkpoint-125/adapter_config.json +34 -0
  34. just_lora_weights/checkpoint-125/adapter_model.safetensors +3 -0
  35. just_lora_weights/checkpoint-125/added_tokens.json +28 -0
  36. just_lora_weights/checkpoint-125/chat_template.jinja +89 -0
  37. just_lora_weights/checkpoint-125/merges.txt +0 -0
  38. just_lora_weights/checkpoint-125/optimizer.pt +3 -0
  39. just_lora_weights/checkpoint-125/rng_state.pth +3 -0
  40. just_lora_weights/checkpoint-125/scheduler.pt +3 -0
  41. just_lora_weights/checkpoint-125/special_tokens_map.json +31 -0
  42. just_lora_weights/checkpoint-125/tokenizer.json +3 -0
  43. just_lora_weights/checkpoint-125/tokenizer_config.json +239 -0
  44. just_lora_weights/checkpoint-125/trainer_state.json +209 -0
  45. just_lora_weights/checkpoint-125/training_args.bin +3 -0
  46. just_lora_weights/checkpoint-125/vocab.json +0 -0
  47. just_lora_weights/checkpoint-150/README.md +202 -0
  48. just_lora_weights/checkpoint-150/adapter_config.json +34 -0
  49. just_lora_weights/checkpoint-150/adapter_model.safetensors +3 -0
  50. just_lora_weights/checkpoint-150/added_tokens.json +28 -0
.gitattributes CHANGED
@@ -46,3 +46,43 @@ lora_weights/checkpoint-700/tokenizer.json filter=lfs diff=lfs merge=lfs -text
46
  lora_weights/checkpoint-800/tokenizer.json filter=lfs diff=lfs merge=lfs -text
47
  lora_weights/checkpoint-900/tokenizer.json filter=lfs diff=lfs merge=lfs -text
48
  lora_weights/tokenizer.json filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
  lora_weights/checkpoint-800/tokenizer.json filter=lfs diff=lfs merge=lfs -text
47
  lora_weights/checkpoint-900/tokenizer.json filter=lfs diff=lfs merge=lfs -text
48
  lora_weights/tokenizer.json filter=lfs diff=lfs merge=lfs -text
49
+ just_lora_weights/checkpoint-100/tokenizer.json filter=lfs diff=lfs merge=lfs -text
50
+ just_lora_weights/checkpoint-1000/tokenizer.json filter=lfs diff=lfs merge=lfs -text
51
+ just_lora_weights/checkpoint-125/tokenizer.json filter=lfs diff=lfs merge=lfs -text
52
+ just_lora_weights/checkpoint-150/tokenizer.json filter=lfs diff=lfs merge=lfs -text
53
+ just_lora_weights/checkpoint-175/tokenizer.json filter=lfs diff=lfs merge=lfs -text
54
+ just_lora_weights/checkpoint-200/tokenizer.json filter=lfs diff=lfs merge=lfs -text
55
+ just_lora_weights/checkpoint-225/tokenizer.json filter=lfs diff=lfs merge=lfs -text
56
+ just_lora_weights/checkpoint-25/tokenizer.json filter=lfs diff=lfs merge=lfs -text
57
+ just_lora_weights/checkpoint-250/tokenizer.json filter=lfs diff=lfs merge=lfs -text
58
+ just_lora_weights/checkpoint-275/tokenizer.json filter=lfs diff=lfs merge=lfs -text
59
+ just_lora_weights/checkpoint-300/tokenizer.json filter=lfs diff=lfs merge=lfs -text
60
+ just_lora_weights/checkpoint-325/tokenizer.json filter=lfs diff=lfs merge=lfs -text
61
+ just_lora_weights/checkpoint-350/tokenizer.json filter=lfs diff=lfs merge=lfs -text
62
+ just_lora_weights/checkpoint-375/tokenizer.json filter=lfs diff=lfs merge=lfs -text
63
+ just_lora_weights/checkpoint-400/tokenizer.json filter=lfs diff=lfs merge=lfs -text
64
+ just_lora_weights/checkpoint-425/tokenizer.json filter=lfs diff=lfs merge=lfs -text
65
+ just_lora_weights/checkpoint-450/tokenizer.json filter=lfs diff=lfs merge=lfs -text
66
+ just_lora_weights/checkpoint-475/tokenizer.json filter=lfs diff=lfs merge=lfs -text
67
+ just_lora_weights/checkpoint-50/tokenizer.json filter=lfs diff=lfs merge=lfs -text
68
+ just_lora_weights/checkpoint-500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
69
+ just_lora_weights/checkpoint-525/tokenizer.json filter=lfs diff=lfs merge=lfs -text
70
+ just_lora_weights/checkpoint-550/tokenizer.json filter=lfs diff=lfs merge=lfs -text
71
+ just_lora_weights/checkpoint-575/tokenizer.json filter=lfs diff=lfs merge=lfs -text
72
+ just_lora_weights/checkpoint-600/tokenizer.json filter=lfs diff=lfs merge=lfs -text
73
+ just_lora_weights/checkpoint-625/tokenizer.json filter=lfs diff=lfs merge=lfs -text
74
+ just_lora_weights/checkpoint-650/tokenizer.json filter=lfs diff=lfs merge=lfs -text
75
+ just_lora_weights/checkpoint-675/tokenizer.json filter=lfs diff=lfs merge=lfs -text
76
+ just_lora_weights/checkpoint-700/tokenizer.json filter=lfs diff=lfs merge=lfs -text
77
+ just_lora_weights/checkpoint-725/tokenizer.json filter=lfs diff=lfs merge=lfs -text
78
+ just_lora_weights/checkpoint-75/tokenizer.json filter=lfs diff=lfs merge=lfs -text
79
+ just_lora_weights/checkpoint-750/tokenizer.json filter=lfs diff=lfs merge=lfs -text
80
+ just_lora_weights/checkpoint-775/tokenizer.json filter=lfs diff=lfs merge=lfs -text
81
+ just_lora_weights/checkpoint-800/tokenizer.json filter=lfs diff=lfs merge=lfs -text
82
+ just_lora_weights/checkpoint-825/tokenizer.json filter=lfs diff=lfs merge=lfs -text
83
+ just_lora_weights/checkpoint-850/tokenizer.json filter=lfs diff=lfs merge=lfs -text
84
+ just_lora_weights/checkpoint-875/tokenizer.json filter=lfs diff=lfs merge=lfs -text
85
+ just_lora_weights/checkpoint-900/tokenizer.json filter=lfs diff=lfs merge=lfs -text
86
+ just_lora_weights/checkpoint-925/tokenizer.json filter=lfs diff=lfs merge=lfs -text
87
+ just_lora_weights/checkpoint-950/tokenizer.json filter=lfs diff=lfs merge=lfs -text
88
+ just_lora_weights/checkpoint-975/tokenizer.json filter=lfs diff=lfs merge=lfs -text
just_lora_weights/checkpoint-100/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen3-0.6B
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.15.2
just_lora_weights/checkpoint-100/adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen3-0.6B",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 16,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.05,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 16,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "v_proj",
28
+ "q_proj"
29
+ ],
30
+ "task_type": "CAUSAL_LM",
31
+ "trainable_token_indices": null,
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
just_lora_weights/checkpoint-100/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ffef6c1ab5392c2105a0f2fe6f04a5ab071ee766fea9fd61c27a3df03eecd0f3
3
+ size 9189904
just_lora_weights/checkpoint-100/added_tokens.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</think>": 151668,
3
+ "</tool_call>": 151658,
4
+ "</tool_response>": 151666,
5
+ "<think>": 151667,
6
+ "<tool_call>": 151657,
7
+ "<tool_response>": 151665,
8
+ "<|box_end|>": 151649,
9
+ "<|box_start|>": 151648,
10
+ "<|endoftext|>": 151643,
11
+ "<|file_sep|>": 151664,
12
+ "<|fim_middle|>": 151660,
13
+ "<|fim_pad|>": 151662,
14
+ "<|fim_prefix|>": 151659,
15
+ "<|fim_suffix|>": 151661,
16
+ "<|im_end|>": 151645,
17
+ "<|im_start|>": 151644,
18
+ "<|image_pad|>": 151655,
19
+ "<|object_ref_end|>": 151647,
20
+ "<|object_ref_start|>": 151646,
21
+ "<|quad_end|>": 151651,
22
+ "<|quad_start|>": 151650,
23
+ "<|repo_name|>": 151663,
24
+ "<|video_pad|>": 151656,
25
+ "<|vision_end|>": 151653,
26
+ "<|vision_pad|>": 151654,
27
+ "<|vision_start|>": 151652
28
+ }
just_lora_weights/checkpoint-100/chat_template.jinja ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0].role == 'system' %}
4
+ {{- messages[0].content + '\n\n' }}
5
+ {%- endif %}
6
+ {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
7
+ {%- for tool in tools %}
8
+ {{- "\n" }}
9
+ {{- tool | tojson }}
10
+ {%- endfor %}
11
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
12
+ {%- else %}
13
+ {%- if messages[0].role == 'system' %}
14
+ {{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
15
+ {%- endif %}
16
+ {%- endif %}
17
+ {%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
18
+ {%- for message in messages[::-1] %}
19
+ {%- set index = (messages|length - 1) - loop.index0 %}
20
+ {%- if ns.multi_step_tool and message.role == "user" and message.content is string and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}
21
+ {%- set ns.multi_step_tool = false %}
22
+ {%- set ns.last_query_index = index %}
23
+ {%- endif %}
24
+ {%- endfor %}
25
+ {%- for message in messages %}
26
+ {%- if message.content is string %}
27
+ {%- set content = message.content %}
28
+ {%- else %}
29
+ {%- set content = '' %}
30
+ {%- endif %}
31
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
32
+ {{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
33
+ {%- elif message.role == "assistant" %}
34
+ {%- set reasoning_content = '' %}
35
+ {%- if message.reasoning_content is string %}
36
+ {%- set reasoning_content = message.reasoning_content %}
37
+ {%- else %}
38
+ {%- if '</think>' in content %}
39
+ {%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
40
+ {%- set content = content.split('</think>')[-1].lstrip('\n') %}
41
+ {%- endif %}
42
+ {%- endif %}
43
+ {%- if loop.index0 > ns.last_query_index %}
44
+ {%- if loop.last or (not loop.last and reasoning_content) %}
45
+ {{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
46
+ {%- else %}
47
+ {{- '<|im_start|>' + message.role + '\n' + content }}
48
+ {%- endif %}
49
+ {%- else %}
50
+ {{- '<|im_start|>' + message.role + '\n' + content }}
51
+ {%- endif %}
52
+ {%- if message.tool_calls %}
53
+ {%- for tool_call in message.tool_calls %}
54
+ {%- if (loop.first and content) or (not loop.first) %}
55
+ {{- '\n' }}
56
+ {%- endif %}
57
+ {%- if tool_call.function %}
58
+ {%- set tool_call = tool_call.function %}
59
+ {%- endif %}
60
+ {{- '<tool_call>\n{"name": "' }}
61
+ {{- tool_call.name }}
62
+ {{- '", "arguments": ' }}
63
+ {%- if tool_call.arguments is string %}
64
+ {{- tool_call.arguments }}
65
+ {%- else %}
66
+ {{- tool_call.arguments | tojson }}
67
+ {%- endif %}
68
+ {{- '}\n</tool_call>' }}
69
+ {%- endfor %}
70
+ {%- endif %}
71
+ {{- '<|im_end|>\n' }}
72
+ {%- elif message.role == "tool" %}
73
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
74
+ {{- '<|im_start|>user' }}
75
+ {%- endif %}
76
+ {{- '\n<tool_response>\n' }}
77
+ {{- content }}
78
+ {{- '\n</tool_response>' }}
79
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
80
+ {{- '<|im_end|>\n' }}
81
+ {%- endif %}
82
+ {%- endif %}
83
+ {%- endfor %}
84
+ {%- if add_generation_prompt %}
85
+ {{- '<|im_start|>assistant\n' }}
86
+ {%- if enable_thinking is defined and enable_thinking is false %}
87
+ {{- '<think>\n\n</think>\n\n' }}
88
+ {%- endif %}
89
+ {%- endif %}
just_lora_weights/checkpoint-100/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
just_lora_weights/checkpoint-100/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a6011cf05831da10839186bbb6ae10d769f8923315a71a356e4471214ae636b
3
+ size 18441675
just_lora_weights/checkpoint-100/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cddf27219365242ec1046a3532a63a24c3f350c77f100e4f973369db2cc849d
3
+ size 14455
just_lora_weights/checkpoint-100/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6d464ddcd54c8f7496943158fe241c4fb8e43ccfac60bcb134909899ddfd40c
3
+ size 1465
just_lora_weights/checkpoint-100/special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
just_lora_weights/checkpoint-100/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aeb13307a71acd8fe81861d94ad54ab689df773318809eed3cbe794b4492dae4
3
+ size 11422654
just_lora_weights/checkpoint-100/tokenizer_config.json ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ },
181
+ "151665": {
182
+ "content": "<tool_response>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": false
188
+ },
189
+ "151666": {
190
+ "content": "</tool_response>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": false
196
+ },
197
+ "151667": {
198
+ "content": "<think>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": false
204
+ },
205
+ "151668": {
206
+ "content": "</think>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": false
212
+ }
213
+ },
214
+ "additional_special_tokens": [
215
+ "<|im_start|>",
216
+ "<|im_end|>",
217
+ "<|object_ref_start|>",
218
+ "<|object_ref_end|>",
219
+ "<|box_start|>",
220
+ "<|box_end|>",
221
+ "<|quad_start|>",
222
+ "<|quad_end|>",
223
+ "<|vision_start|>",
224
+ "<|vision_end|>",
225
+ "<|vision_pad|>",
226
+ "<|image_pad|>",
227
+ "<|video_pad|>"
228
+ ],
229
+ "bos_token": null,
230
+ "clean_up_tokenization_spaces": false,
231
+ "eos_token": "<|im_end|>",
232
+ "errors": "replace",
233
+ "extra_special_tokens": {},
234
+ "model_max_length": 131072,
235
+ "pad_token": "<|endoftext|>",
236
+ "split_special_tokens": false,
237
+ "tokenizer_class": "Qwen2Tokenizer",
238
+ "unk_token": null
239
+ }
just_lora_weights/checkpoint-100/trainer_state.json ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 0.1,
6
+ "eval_steps": 500,
7
+ "global_step": 100,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.005,
14
+ "grad_norm": 4.4095964431762695,
15
+ "learning_rate": 4.9800000000000004e-05,
16
+ "loss": 5.4329,
17
+ "step": 5
18
+ },
19
+ {
20
+ "epoch": 0.01,
21
+ "grad_norm": 2.658348798751831,
22
+ "learning_rate": 4.9550000000000005e-05,
23
+ "loss": 4.6734,
24
+ "step": 10
25
+ },
26
+ {
27
+ "epoch": 0.015,
28
+ "grad_norm": 3.4242470264434814,
29
+ "learning_rate": 4.93e-05,
30
+ "loss": 4.8327,
31
+ "step": 15
32
+ },
33
+ {
34
+ "epoch": 0.02,
35
+ "grad_norm": 4.363213539123535,
36
+ "learning_rate": 4.905e-05,
37
+ "loss": 4.4238,
38
+ "step": 20
39
+ },
40
+ {
41
+ "epoch": 0.025,
42
+ "grad_norm": 2.531619071960449,
43
+ "learning_rate": 4.88e-05,
44
+ "loss": 3.9284,
45
+ "step": 25
46
+ },
47
+ {
48
+ "epoch": 0.03,
49
+ "grad_norm": 4.686695098876953,
50
+ "learning_rate": 4.855e-05,
51
+ "loss": 4.6794,
52
+ "step": 30
53
+ },
54
+ {
55
+ "epoch": 0.035,
56
+ "grad_norm": 4.860099792480469,
57
+ "learning_rate": 4.83e-05,
58
+ "loss": 5.0624,
59
+ "step": 35
60
+ },
61
+ {
62
+ "epoch": 0.04,
63
+ "grad_norm": 8.296855926513672,
64
+ "learning_rate": 4.805e-05,
65
+ "loss": 4.8011,
66
+ "step": 40
67
+ },
68
+ {
69
+ "epoch": 0.045,
70
+ "grad_norm": 4.80878210067749,
71
+ "learning_rate": 4.78e-05,
72
+ "loss": 5.495,
73
+ "step": 45
74
+ },
75
+ {
76
+ "epoch": 0.05,
77
+ "grad_norm": 4.766687393188477,
78
+ "learning_rate": 4.755e-05,
79
+ "loss": 4.6778,
80
+ "step": 50
81
+ },
82
+ {
83
+ "epoch": 0.055,
84
+ "grad_norm": 2.7137186527252197,
85
+ "learning_rate": 4.73e-05,
86
+ "loss": 4.5044,
87
+ "step": 55
88
+ },
89
+ {
90
+ "epoch": 0.06,
91
+ "grad_norm": 9.891243934631348,
92
+ "learning_rate": 4.705e-05,
93
+ "loss": 4.4744,
94
+ "step": 60
95
+ },
96
+ {
97
+ "epoch": 0.065,
98
+ "grad_norm": 3.6516237258911133,
99
+ "learning_rate": 4.6800000000000006e-05,
100
+ "loss": 4.6576,
101
+ "step": 65
102
+ },
103
+ {
104
+ "epoch": 0.07,
105
+ "grad_norm": 5.687813758850098,
106
+ "learning_rate": 4.655000000000001e-05,
107
+ "loss": 5.1204,
108
+ "step": 70
109
+ },
110
+ {
111
+ "epoch": 0.075,
112
+ "grad_norm": 4.273693561553955,
113
+ "learning_rate": 4.630000000000001e-05,
114
+ "loss": 5.361,
115
+ "step": 75
116
+ },
117
+ {
118
+ "epoch": 0.08,
119
+ "grad_norm": 6.802962779998779,
120
+ "learning_rate": 4.605e-05,
121
+ "loss": 5.5483,
122
+ "step": 80
123
+ },
124
+ {
125
+ "epoch": 0.085,
126
+ "grad_norm": 2.7016360759735107,
127
+ "learning_rate": 4.58e-05,
128
+ "loss": 4.5909,
129
+ "step": 85
130
+ },
131
+ {
132
+ "epoch": 0.09,
133
+ "grad_norm": 8.201666831970215,
134
+ "learning_rate": 4.555e-05,
135
+ "loss": 4.2807,
136
+ "step": 90
137
+ },
138
+ {
139
+ "epoch": 0.095,
140
+ "grad_norm": 3.8271970748901367,
141
+ "learning_rate": 4.53e-05,
142
+ "loss": 5.5645,
143
+ "step": 95
144
+ },
145
+ {
146
+ "epoch": 0.1,
147
+ "grad_norm": 3.17282772064209,
148
+ "learning_rate": 4.5050000000000004e-05,
149
+ "loss": 4.5845,
150
+ "step": 100
151
+ }
152
+ ],
153
+ "logging_steps": 5,
154
+ "max_steps": 1000,
155
+ "num_input_tokens_seen": 0,
156
+ "num_train_epochs": 1,
157
+ "save_steps": 25,
158
+ "stateful_callbacks": {
159
+ "TrainerControl": {
160
+ "args": {
161
+ "should_epoch_stop": false,
162
+ "should_evaluate": false,
163
+ "should_log": false,
164
+ "should_save": true,
165
+ "should_training_stop": false
166
+ },
167
+ "attributes": {}
168
+ }
169
+ },
170
+ "total_flos": 34004061388800.0,
171
+ "train_batch_size": 1,
172
+ "trial_name": null,
173
+ "trial_params": null
174
+ }
just_lora_weights/checkpoint-100/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b32254dfaf0c82ecfc54cd31b94eb7c8ebb2e7d832a31fe43128bcff0fa132b3
3
+ size 5713
just_lora_weights/checkpoint-100/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
just_lora_weights/checkpoint-1000/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen3-0.6B
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.15.2
just_lora_weights/checkpoint-1000/adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen3-0.6B",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 16,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.05,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 16,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "v_proj",
28
+ "q_proj"
29
+ ],
30
+ "task_type": "CAUSAL_LM",
31
+ "trainable_token_indices": null,
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
just_lora_weights/checkpoint-1000/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46152164316484799408f9ffa7cb81d8563e68b7cfac4cb104568a12732313b8
3
+ size 9189904
just_lora_weights/checkpoint-1000/added_tokens.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</think>": 151668,
3
+ "</tool_call>": 151658,
4
+ "</tool_response>": 151666,
5
+ "<think>": 151667,
6
+ "<tool_call>": 151657,
7
+ "<tool_response>": 151665,
8
+ "<|box_end|>": 151649,
9
+ "<|box_start|>": 151648,
10
+ "<|endoftext|>": 151643,
11
+ "<|file_sep|>": 151664,
12
+ "<|fim_middle|>": 151660,
13
+ "<|fim_pad|>": 151662,
14
+ "<|fim_prefix|>": 151659,
15
+ "<|fim_suffix|>": 151661,
16
+ "<|im_end|>": 151645,
17
+ "<|im_start|>": 151644,
18
+ "<|image_pad|>": 151655,
19
+ "<|object_ref_end|>": 151647,
20
+ "<|object_ref_start|>": 151646,
21
+ "<|quad_end|>": 151651,
22
+ "<|quad_start|>": 151650,
23
+ "<|repo_name|>": 151663,
24
+ "<|video_pad|>": 151656,
25
+ "<|vision_end|>": 151653,
26
+ "<|vision_pad|>": 151654,
27
+ "<|vision_start|>": 151652
28
+ }
just_lora_weights/checkpoint-1000/chat_template.jinja ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0].role == 'system' %}
4
+ {{- messages[0].content + '\n\n' }}
5
+ {%- endif %}
6
+ {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
7
+ {%- for tool in tools %}
8
+ {{- "\n" }}
9
+ {{- tool | tojson }}
10
+ {%- endfor %}
11
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
12
+ {%- else %}
13
+ {%- if messages[0].role == 'system' %}
14
+ {{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
15
+ {%- endif %}
16
+ {%- endif %}
17
+ {%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
18
+ {%- for message in messages[::-1] %}
19
+ {%- set index = (messages|length - 1) - loop.index0 %}
20
+ {%- if ns.multi_step_tool and message.role == "user" and message.content is string and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}
21
+ {%- set ns.multi_step_tool = false %}
22
+ {%- set ns.last_query_index = index %}
23
+ {%- endif %}
24
+ {%- endfor %}
25
+ {%- for message in messages %}
26
+ {%- if message.content is string %}
27
+ {%- set content = message.content %}
28
+ {%- else %}
29
+ {%- set content = '' %}
30
+ {%- endif %}
31
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
32
+ {{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
33
+ {%- elif message.role == "assistant" %}
34
+ {%- set reasoning_content = '' %}
35
+ {%- if message.reasoning_content is string %}
36
+ {%- set reasoning_content = message.reasoning_content %}
37
+ {%- else %}
38
+ {%- if '</think>' in content %}
39
+ {%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
40
+ {%- set content = content.split('</think>')[-1].lstrip('\n') %}
41
+ {%- endif %}
42
+ {%- endif %}
43
+ {%- if loop.index0 > ns.last_query_index %}
44
+ {%- if loop.last or (not loop.last and reasoning_content) %}
45
+ {{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
46
+ {%- else %}
47
+ {{- '<|im_start|>' + message.role + '\n' + content }}
48
+ {%- endif %}
49
+ {%- else %}
50
+ {{- '<|im_start|>' + message.role + '\n' + content }}
51
+ {%- endif %}
52
+ {%- if message.tool_calls %}
53
+ {%- for tool_call in message.tool_calls %}
54
+ {%- if (loop.first and content) or (not loop.first) %}
55
+ {{- '\n' }}
56
+ {%- endif %}
57
+ {%- if tool_call.function %}
58
+ {%- set tool_call = tool_call.function %}
59
+ {%- endif %}
60
+ {{- '<tool_call>\n{"name": "' }}
61
+ {{- tool_call.name }}
62
+ {{- '", "arguments": ' }}
63
+ {%- if tool_call.arguments is string %}
64
+ {{- tool_call.arguments }}
65
+ {%- else %}
66
+ {{- tool_call.arguments | tojson }}
67
+ {%- endif %}
68
+ {{- '}\n</tool_call>' }}
69
+ {%- endfor %}
70
+ {%- endif %}
71
+ {{- '<|im_end|>\n' }}
72
+ {%- elif message.role == "tool" %}
73
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
74
+ {{- '<|im_start|>user' }}
75
+ {%- endif %}
76
+ {{- '\n<tool_response>\n' }}
77
+ {{- content }}
78
+ {{- '\n</tool_response>' }}
79
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
80
+ {{- '<|im_end|>\n' }}
81
+ {%- endif %}
82
+ {%- endif %}
83
+ {%- endfor %}
84
+ {%- if add_generation_prompt %}
85
+ {{- '<|im_start|>assistant\n' }}
86
+ {%- if enable_thinking is defined and enable_thinking is false %}
87
+ {{- '<think>\n\n</think>\n\n' }}
88
+ {%- endif %}
89
+ {%- endif %}
just_lora_weights/checkpoint-1000/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
just_lora_weights/checkpoint-1000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0cbdcdb1ca6ed7d700b4835a35f839fa6d85a737608522ce984feefd2204a950
3
+ size 18441675
just_lora_weights/checkpoint-1000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cddf27219365242ec1046a3532a63a24c3f350c77f100e4f973369db2cc849d
3
+ size 14455
just_lora_weights/checkpoint-1000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e78a303f3dc54e638a18cc4374f3e288409f35549509f776b4cdbbc1b527159
3
+ size 1465
just_lora_weights/checkpoint-1000/special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
just_lora_weights/checkpoint-1000/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aeb13307a71acd8fe81861d94ad54ab689df773318809eed3cbe794b4492dae4
3
+ size 11422654
just_lora_weights/checkpoint-1000/tokenizer_config.json ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ },
181
+ "151665": {
182
+ "content": "<tool_response>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": false
188
+ },
189
+ "151666": {
190
+ "content": "</tool_response>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": false
196
+ },
197
+ "151667": {
198
+ "content": "<think>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": false
204
+ },
205
+ "151668": {
206
+ "content": "</think>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": false
212
+ }
213
+ },
214
+ "additional_special_tokens": [
215
+ "<|im_start|>",
216
+ "<|im_end|>",
217
+ "<|object_ref_start|>",
218
+ "<|object_ref_end|>",
219
+ "<|box_start|>",
220
+ "<|box_end|>",
221
+ "<|quad_start|>",
222
+ "<|quad_end|>",
223
+ "<|vision_start|>",
224
+ "<|vision_end|>",
225
+ "<|vision_pad|>",
226
+ "<|image_pad|>",
227
+ "<|video_pad|>"
228
+ ],
229
+ "bos_token": null,
230
+ "clean_up_tokenization_spaces": false,
231
+ "eos_token": "<|im_end|>",
232
+ "errors": "replace",
233
+ "extra_special_tokens": {},
234
+ "model_max_length": 131072,
235
+ "pad_token": "<|endoftext|>",
236
+ "split_special_tokens": false,
237
+ "tokenizer_class": "Qwen2Tokenizer",
238
+ "unk_token": null
239
+ }
just_lora_weights/checkpoint-1000/trainer_state.json ADDED
@@ -0,0 +1,1434 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 1.0,
6
+ "eval_steps": 500,
7
+ "global_step": 1000,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.005,
14
+ "grad_norm": 4.4095964431762695,
15
+ "learning_rate": 4.9800000000000004e-05,
16
+ "loss": 5.4329,
17
+ "step": 5
18
+ },
19
+ {
20
+ "epoch": 0.01,
21
+ "grad_norm": 2.658348798751831,
22
+ "learning_rate": 4.9550000000000005e-05,
23
+ "loss": 4.6734,
24
+ "step": 10
25
+ },
26
+ {
27
+ "epoch": 0.015,
28
+ "grad_norm": 3.4242470264434814,
29
+ "learning_rate": 4.93e-05,
30
+ "loss": 4.8327,
31
+ "step": 15
32
+ },
33
+ {
34
+ "epoch": 0.02,
35
+ "grad_norm": 4.363213539123535,
36
+ "learning_rate": 4.905e-05,
37
+ "loss": 4.4238,
38
+ "step": 20
39
+ },
40
+ {
41
+ "epoch": 0.025,
42
+ "grad_norm": 2.531619071960449,
43
+ "learning_rate": 4.88e-05,
44
+ "loss": 3.9284,
45
+ "step": 25
46
+ },
47
+ {
48
+ "epoch": 0.03,
49
+ "grad_norm": 4.686695098876953,
50
+ "learning_rate": 4.855e-05,
51
+ "loss": 4.6794,
52
+ "step": 30
53
+ },
54
+ {
55
+ "epoch": 0.035,
56
+ "grad_norm": 4.860099792480469,
57
+ "learning_rate": 4.83e-05,
58
+ "loss": 5.0624,
59
+ "step": 35
60
+ },
61
+ {
62
+ "epoch": 0.04,
63
+ "grad_norm": 8.296855926513672,
64
+ "learning_rate": 4.805e-05,
65
+ "loss": 4.8011,
66
+ "step": 40
67
+ },
68
+ {
69
+ "epoch": 0.045,
70
+ "grad_norm": 4.80878210067749,
71
+ "learning_rate": 4.78e-05,
72
+ "loss": 5.495,
73
+ "step": 45
74
+ },
75
+ {
76
+ "epoch": 0.05,
77
+ "grad_norm": 4.766687393188477,
78
+ "learning_rate": 4.755e-05,
79
+ "loss": 4.6778,
80
+ "step": 50
81
+ },
82
+ {
83
+ "epoch": 0.055,
84
+ "grad_norm": 2.7137186527252197,
85
+ "learning_rate": 4.73e-05,
86
+ "loss": 4.5044,
87
+ "step": 55
88
+ },
89
+ {
90
+ "epoch": 0.06,
91
+ "grad_norm": 9.891243934631348,
92
+ "learning_rate": 4.705e-05,
93
+ "loss": 4.4744,
94
+ "step": 60
95
+ },
96
+ {
97
+ "epoch": 0.065,
98
+ "grad_norm": 3.6516237258911133,
99
+ "learning_rate": 4.6800000000000006e-05,
100
+ "loss": 4.6576,
101
+ "step": 65
102
+ },
103
+ {
104
+ "epoch": 0.07,
105
+ "grad_norm": 5.687813758850098,
106
+ "learning_rate": 4.655000000000001e-05,
107
+ "loss": 5.1204,
108
+ "step": 70
109
+ },
110
+ {
111
+ "epoch": 0.075,
112
+ "grad_norm": 4.273693561553955,
113
+ "learning_rate": 4.630000000000001e-05,
114
+ "loss": 5.361,
115
+ "step": 75
116
+ },
117
+ {
118
+ "epoch": 0.08,
119
+ "grad_norm": 6.802962779998779,
120
+ "learning_rate": 4.605e-05,
121
+ "loss": 5.5483,
122
+ "step": 80
123
+ },
124
+ {
125
+ "epoch": 0.085,
126
+ "grad_norm": 2.7016360759735107,
127
+ "learning_rate": 4.58e-05,
128
+ "loss": 4.5909,
129
+ "step": 85
130
+ },
131
+ {
132
+ "epoch": 0.09,
133
+ "grad_norm": 8.201666831970215,
134
+ "learning_rate": 4.555e-05,
135
+ "loss": 4.2807,
136
+ "step": 90
137
+ },
138
+ {
139
+ "epoch": 0.095,
140
+ "grad_norm": 3.8271970748901367,
141
+ "learning_rate": 4.53e-05,
142
+ "loss": 5.5645,
143
+ "step": 95
144
+ },
145
+ {
146
+ "epoch": 0.1,
147
+ "grad_norm": 3.17282772064209,
148
+ "learning_rate": 4.5050000000000004e-05,
149
+ "loss": 4.5845,
150
+ "step": 100
151
+ },
152
+ {
153
+ "epoch": 0.105,
154
+ "grad_norm": 2.9256932735443115,
155
+ "learning_rate": 4.4800000000000005e-05,
156
+ "loss": 4.6003,
157
+ "step": 105
158
+ },
159
+ {
160
+ "epoch": 0.11,
161
+ "grad_norm": 10.4873046875,
162
+ "learning_rate": 4.4550000000000005e-05,
163
+ "loss": 4.6453,
164
+ "step": 110
165
+ },
166
+ {
167
+ "epoch": 0.115,
168
+ "grad_norm": 2.9158883094787598,
169
+ "learning_rate": 4.43e-05,
170
+ "loss": 4.8935,
171
+ "step": 115
172
+ },
173
+ {
174
+ "epoch": 0.12,
175
+ "grad_norm": 3.5588793754577637,
176
+ "learning_rate": 4.405e-05,
177
+ "loss": 3.9582,
178
+ "step": 120
179
+ },
180
+ {
181
+ "epoch": 0.125,
182
+ "grad_norm": 2.7040491104125977,
183
+ "learning_rate": 4.38e-05,
184
+ "loss": 4.6688,
185
+ "step": 125
186
+ },
187
+ {
188
+ "epoch": 0.13,
189
+ "grad_norm": 3.1420748233795166,
190
+ "learning_rate": 4.355e-05,
191
+ "loss": 4.5591,
192
+ "step": 130
193
+ },
194
+ {
195
+ "epoch": 0.135,
196
+ "grad_norm": 2.023017168045044,
197
+ "learning_rate": 4.33e-05,
198
+ "loss": 4.5944,
199
+ "step": 135
200
+ },
201
+ {
202
+ "epoch": 0.14,
203
+ "grad_norm": 2.902434825897217,
204
+ "learning_rate": 4.305e-05,
205
+ "loss": 5.4079,
206
+ "step": 140
207
+ },
208
+ {
209
+ "epoch": 0.145,
210
+ "grad_norm": 2.4470090866088867,
211
+ "learning_rate": 4.2800000000000004e-05,
212
+ "loss": 4.9259,
213
+ "step": 145
214
+ },
215
+ {
216
+ "epoch": 0.15,
217
+ "grad_norm": 4.8970046043396,
218
+ "learning_rate": 4.2550000000000004e-05,
219
+ "loss": 4.6465,
220
+ "step": 150
221
+ },
222
+ {
223
+ "epoch": 0.155,
224
+ "grad_norm": 2.197767734527588,
225
+ "learning_rate": 4.23e-05,
226
+ "loss": 4.6841,
227
+ "step": 155
228
+ },
229
+ {
230
+ "epoch": 0.16,
231
+ "grad_norm": 4.577955722808838,
232
+ "learning_rate": 4.205e-05,
233
+ "loss": 4.1775,
234
+ "step": 160
235
+ },
236
+ {
237
+ "epoch": 0.165,
238
+ "grad_norm": 2.3383991718292236,
239
+ "learning_rate": 4.18e-05,
240
+ "loss": 4.0309,
241
+ "step": 165
242
+ },
243
+ {
244
+ "epoch": 0.17,
245
+ "grad_norm": 3.794187307357788,
246
+ "learning_rate": 4.155e-05,
247
+ "loss": 4.5336,
248
+ "step": 170
249
+ },
250
+ {
251
+ "epoch": 0.175,
252
+ "grad_norm": 4.427373886108398,
253
+ "learning_rate": 4.13e-05,
254
+ "loss": 4.241,
255
+ "step": 175
256
+ },
257
+ {
258
+ "epoch": 0.18,
259
+ "grad_norm": 2.8116564750671387,
260
+ "learning_rate": 4.105e-05,
261
+ "loss": 4.7562,
262
+ "step": 180
263
+ },
264
+ {
265
+ "epoch": 0.185,
266
+ "grad_norm": 5.841445446014404,
267
+ "learning_rate": 4.08e-05,
268
+ "loss": 3.7034,
269
+ "step": 185
270
+ },
271
+ {
272
+ "epoch": 0.19,
273
+ "grad_norm": 20.96999740600586,
274
+ "learning_rate": 4.055e-05,
275
+ "loss": 5.1727,
276
+ "step": 190
277
+ },
278
+ {
279
+ "epoch": 0.195,
280
+ "grad_norm": 11.743700981140137,
281
+ "learning_rate": 4.0300000000000004e-05,
282
+ "loss": 5.1553,
283
+ "step": 195
284
+ },
285
+ {
286
+ "epoch": 0.2,
287
+ "grad_norm": 10.230498313903809,
288
+ "learning_rate": 4.0050000000000004e-05,
289
+ "loss": 4.8657,
290
+ "step": 200
291
+ },
292
+ {
293
+ "epoch": 0.205,
294
+ "grad_norm": 5.706521987915039,
295
+ "learning_rate": 3.9800000000000005e-05,
296
+ "loss": 4.3638,
297
+ "step": 205
298
+ },
299
+ {
300
+ "epoch": 0.21,
301
+ "grad_norm": 5.781250476837158,
302
+ "learning_rate": 3.9550000000000006e-05,
303
+ "loss": 4.3452,
304
+ "step": 210
305
+ },
306
+ {
307
+ "epoch": 0.215,
308
+ "grad_norm": 2.371096611022949,
309
+ "learning_rate": 3.9300000000000007e-05,
310
+ "loss": 4.6681,
311
+ "step": 215
312
+ },
313
+ {
314
+ "epoch": 0.22,
315
+ "grad_norm": 4.352181911468506,
316
+ "learning_rate": 3.905e-05,
317
+ "loss": 4.2025,
318
+ "step": 220
319
+ },
320
+ {
321
+ "epoch": 0.225,
322
+ "grad_norm": 4.531359672546387,
323
+ "learning_rate": 3.88e-05,
324
+ "loss": 4.5391,
325
+ "step": 225
326
+ },
327
+ {
328
+ "epoch": 0.23,
329
+ "grad_norm": 6.948793411254883,
330
+ "learning_rate": 3.855e-05,
331
+ "loss": 4.0233,
332
+ "step": 230
333
+ },
334
+ {
335
+ "epoch": 0.235,
336
+ "grad_norm": 4.765657424926758,
337
+ "learning_rate": 3.83e-05,
338
+ "loss": 4.1225,
339
+ "step": 235
340
+ },
341
+ {
342
+ "epoch": 0.24,
343
+ "grad_norm": 3.838766574859619,
344
+ "learning_rate": 3.805e-05,
345
+ "loss": 4.9202,
346
+ "step": 240
347
+ },
348
+ {
349
+ "epoch": 0.245,
350
+ "grad_norm": 6.471155643463135,
351
+ "learning_rate": 3.7800000000000004e-05,
352
+ "loss": 4.7561,
353
+ "step": 245
354
+ },
355
+ {
356
+ "epoch": 0.25,
357
+ "grad_norm": 5.748368740081787,
358
+ "learning_rate": 3.7550000000000005e-05,
359
+ "loss": 3.7783,
360
+ "step": 250
361
+ },
362
+ {
363
+ "epoch": 0.255,
364
+ "grad_norm": 2.663558006286621,
365
+ "learning_rate": 3.73e-05,
366
+ "loss": 4.1553,
367
+ "step": 255
368
+ },
369
+ {
370
+ "epoch": 0.26,
371
+ "grad_norm": 3.4103472232818604,
372
+ "learning_rate": 3.705e-05,
373
+ "loss": 4.9314,
374
+ "step": 260
375
+ },
376
+ {
377
+ "epoch": 0.265,
378
+ "grad_norm": 6.4364824295043945,
379
+ "learning_rate": 3.68e-05,
380
+ "loss": 4.4708,
381
+ "step": 265
382
+ },
383
+ {
384
+ "epoch": 0.27,
385
+ "grad_norm": 4.145668983459473,
386
+ "learning_rate": 3.655e-05,
387
+ "loss": 4.5879,
388
+ "step": 270
389
+ },
390
+ {
391
+ "epoch": 0.275,
392
+ "grad_norm": 3.4401297569274902,
393
+ "learning_rate": 3.63e-05,
394
+ "loss": 4.7668,
395
+ "step": 275
396
+ },
397
+ {
398
+ "epoch": 0.28,
399
+ "grad_norm": 9.18470287322998,
400
+ "learning_rate": 3.605e-05,
401
+ "loss": 4.619,
402
+ "step": 280
403
+ },
404
+ {
405
+ "epoch": 0.285,
406
+ "grad_norm": 4.180783748626709,
407
+ "learning_rate": 3.58e-05,
408
+ "loss": 3.8878,
409
+ "step": 285
410
+ },
411
+ {
412
+ "epoch": 0.29,
413
+ "grad_norm": 7.336719036102295,
414
+ "learning_rate": 3.555e-05,
415
+ "loss": 4.5864,
416
+ "step": 290
417
+ },
418
+ {
419
+ "epoch": 0.295,
420
+ "grad_norm": 4.498530864715576,
421
+ "learning_rate": 3.53e-05,
422
+ "loss": 3.8987,
423
+ "step": 295
424
+ },
425
+ {
426
+ "epoch": 0.3,
427
+ "grad_norm": 3.3154962062835693,
428
+ "learning_rate": 3.505e-05,
429
+ "loss": 4.5049,
430
+ "step": 300
431
+ },
432
+ {
433
+ "epoch": 0.305,
434
+ "grad_norm": 5.832047462463379,
435
+ "learning_rate": 3.48e-05,
436
+ "loss": 5.0849,
437
+ "step": 305
438
+ },
439
+ {
440
+ "epoch": 0.31,
441
+ "grad_norm": 17.935897827148438,
442
+ "learning_rate": 3.455e-05,
443
+ "loss": 4.8395,
444
+ "step": 310
445
+ },
446
+ {
447
+ "epoch": 0.315,
448
+ "grad_norm": 8.821608543395996,
449
+ "learning_rate": 3.430000000000001e-05,
450
+ "loss": 4.9111,
451
+ "step": 315
452
+ },
453
+ {
454
+ "epoch": 0.32,
455
+ "grad_norm": 4.014020919799805,
456
+ "learning_rate": 3.405e-05,
457
+ "loss": 3.6753,
458
+ "step": 320
459
+ },
460
+ {
461
+ "epoch": 0.325,
462
+ "grad_norm": 7.319735050201416,
463
+ "learning_rate": 3.38e-05,
464
+ "loss": 4.6448,
465
+ "step": 325
466
+ },
467
+ {
468
+ "epoch": 0.33,
469
+ "grad_norm": 3.252227783203125,
470
+ "learning_rate": 3.355e-05,
471
+ "loss": 4.1821,
472
+ "step": 330
473
+ },
474
+ {
475
+ "epoch": 0.335,
476
+ "grad_norm": 5.4611616134643555,
477
+ "learning_rate": 3.33e-05,
478
+ "loss": 4.6576,
479
+ "step": 335
480
+ },
481
+ {
482
+ "epoch": 0.34,
483
+ "grad_norm": 5.558750152587891,
484
+ "learning_rate": 3.3050000000000004e-05,
485
+ "loss": 3.8608,
486
+ "step": 340
487
+ },
488
+ {
489
+ "epoch": 0.345,
490
+ "grad_norm": 7.037349700927734,
491
+ "learning_rate": 3.2800000000000004e-05,
492
+ "loss": 4.5832,
493
+ "step": 345
494
+ },
495
+ {
496
+ "epoch": 0.35,
497
+ "grad_norm": 8.361868858337402,
498
+ "learning_rate": 3.2550000000000005e-05,
499
+ "loss": 4.7027,
500
+ "step": 350
501
+ },
502
+ {
503
+ "epoch": 0.355,
504
+ "grad_norm": 3.756870746612549,
505
+ "learning_rate": 3.2300000000000006e-05,
506
+ "loss": 3.964,
507
+ "step": 355
508
+ },
509
+ {
510
+ "epoch": 0.36,
511
+ "grad_norm": 6.4203667640686035,
512
+ "learning_rate": 3.205e-05,
513
+ "loss": 4.6799,
514
+ "step": 360
515
+ },
516
+ {
517
+ "epoch": 0.365,
518
+ "grad_norm": 6.658571720123291,
519
+ "learning_rate": 3.18e-05,
520
+ "loss": 4.9203,
521
+ "step": 365
522
+ },
523
+ {
524
+ "epoch": 0.37,
525
+ "grad_norm": 3.5544397830963135,
526
+ "learning_rate": 3.155e-05,
527
+ "loss": 3.6623,
528
+ "step": 370
529
+ },
530
+ {
531
+ "epoch": 0.375,
532
+ "grad_norm": 6.467970848083496,
533
+ "learning_rate": 3.13e-05,
534
+ "loss": 4.5927,
535
+ "step": 375
536
+ },
537
+ {
538
+ "epoch": 0.38,
539
+ "grad_norm": 5.445802211761475,
540
+ "learning_rate": 3.105e-05,
541
+ "loss": 4.2229,
542
+ "step": 380
543
+ },
544
+ {
545
+ "epoch": 0.385,
546
+ "grad_norm": 4.658431529998779,
547
+ "learning_rate": 3.08e-05,
548
+ "loss": 3.9,
549
+ "step": 385
550
+ },
551
+ {
552
+ "epoch": 0.39,
553
+ "grad_norm": 1.9451472759246826,
554
+ "learning_rate": 3.0550000000000004e-05,
555
+ "loss": 4.5191,
556
+ "step": 390
557
+ },
558
+ {
559
+ "epoch": 0.395,
560
+ "grad_norm": 8.094263076782227,
561
+ "learning_rate": 3.03e-05,
562
+ "loss": 4.6497,
563
+ "step": 395
564
+ },
565
+ {
566
+ "epoch": 0.4,
567
+ "grad_norm": 4.177872657775879,
568
+ "learning_rate": 3.0050000000000002e-05,
569
+ "loss": 4.2222,
570
+ "step": 400
571
+ },
572
+ {
573
+ "epoch": 0.405,
574
+ "grad_norm": 5.344555377960205,
575
+ "learning_rate": 2.98e-05,
576
+ "loss": 4.0168,
577
+ "step": 405
578
+ },
579
+ {
580
+ "epoch": 0.41,
581
+ "grad_norm": 8.562535285949707,
582
+ "learning_rate": 2.955e-05,
583
+ "loss": 4.2704,
584
+ "step": 410
585
+ },
586
+ {
587
+ "epoch": 0.415,
588
+ "grad_norm": 4.839510917663574,
589
+ "learning_rate": 2.93e-05,
590
+ "loss": 3.6407,
591
+ "step": 415
592
+ },
593
+ {
594
+ "epoch": 0.42,
595
+ "grad_norm": 6.950375080108643,
596
+ "learning_rate": 2.9049999999999998e-05,
597
+ "loss": 4.0565,
598
+ "step": 420
599
+ },
600
+ {
601
+ "epoch": 0.425,
602
+ "grad_norm": 6.146759510040283,
603
+ "learning_rate": 2.88e-05,
604
+ "loss": 4.1459,
605
+ "step": 425
606
+ },
607
+ {
608
+ "epoch": 0.43,
609
+ "grad_norm": 4.605158805847168,
610
+ "learning_rate": 2.855e-05,
611
+ "loss": 4.5043,
612
+ "step": 430
613
+ },
614
+ {
615
+ "epoch": 0.435,
616
+ "grad_norm": 8.02145004272461,
617
+ "learning_rate": 2.83e-05,
618
+ "loss": 3.9532,
619
+ "step": 435
620
+ },
621
+ {
622
+ "epoch": 0.44,
623
+ "grad_norm": 15.021471977233887,
624
+ "learning_rate": 2.8050000000000004e-05,
625
+ "loss": 4.4141,
626
+ "step": 440
627
+ },
628
+ {
629
+ "epoch": 0.445,
630
+ "grad_norm": 8.20073127746582,
631
+ "learning_rate": 2.7800000000000005e-05,
632
+ "loss": 4.345,
633
+ "step": 445
634
+ },
635
+ {
636
+ "epoch": 0.45,
637
+ "grad_norm": 4.261655807495117,
638
+ "learning_rate": 2.7550000000000002e-05,
639
+ "loss": 4.2696,
640
+ "step": 450
641
+ },
642
+ {
643
+ "epoch": 0.455,
644
+ "grad_norm": 6.736018180847168,
645
+ "learning_rate": 2.7300000000000003e-05,
646
+ "loss": 4.2742,
647
+ "step": 455
648
+ },
649
+ {
650
+ "epoch": 0.46,
651
+ "grad_norm": 4.796390533447266,
652
+ "learning_rate": 2.7050000000000004e-05,
653
+ "loss": 4.1937,
654
+ "step": 460
655
+ },
656
+ {
657
+ "epoch": 0.465,
658
+ "grad_norm": 12.736322402954102,
659
+ "learning_rate": 2.6800000000000004e-05,
660
+ "loss": 4.4847,
661
+ "step": 465
662
+ },
663
+ {
664
+ "epoch": 0.47,
665
+ "grad_norm": 7.087122917175293,
666
+ "learning_rate": 2.655e-05,
667
+ "loss": 4.217,
668
+ "step": 470
669
+ },
670
+ {
671
+ "epoch": 0.475,
672
+ "grad_norm": 6.4038190841674805,
673
+ "learning_rate": 2.6300000000000002e-05,
674
+ "loss": 4.6776,
675
+ "step": 475
676
+ },
677
+ {
678
+ "epoch": 0.48,
679
+ "grad_norm": 5.816014289855957,
680
+ "learning_rate": 2.6050000000000003e-05,
681
+ "loss": 4.0214,
682
+ "step": 480
683
+ },
684
+ {
685
+ "epoch": 0.485,
686
+ "grad_norm": 12.878230094909668,
687
+ "learning_rate": 2.58e-05,
688
+ "loss": 4.407,
689
+ "step": 485
690
+ },
691
+ {
692
+ "epoch": 0.49,
693
+ "grad_norm": 8.707136154174805,
694
+ "learning_rate": 2.555e-05,
695
+ "loss": 4.3686,
696
+ "step": 490
697
+ },
698
+ {
699
+ "epoch": 0.495,
700
+ "grad_norm": 5.850894927978516,
701
+ "learning_rate": 2.5300000000000002e-05,
702
+ "loss": 4.034,
703
+ "step": 495
704
+ },
705
+ {
706
+ "epoch": 0.5,
707
+ "grad_norm": 4.997234344482422,
708
+ "learning_rate": 2.5050000000000002e-05,
709
+ "loss": 3.8851,
710
+ "step": 500
711
+ },
712
+ {
713
+ "epoch": 0.505,
714
+ "grad_norm": 8.565802574157715,
715
+ "learning_rate": 2.48e-05,
716
+ "loss": 4.6681,
717
+ "step": 505
718
+ },
719
+ {
720
+ "epoch": 0.51,
721
+ "grad_norm": 10.438283920288086,
722
+ "learning_rate": 2.455e-05,
723
+ "loss": 4.4391,
724
+ "step": 510
725
+ },
726
+ {
727
+ "epoch": 0.515,
728
+ "grad_norm": 12.387027740478516,
729
+ "learning_rate": 2.43e-05,
730
+ "loss": 4.6539,
731
+ "step": 515
732
+ },
733
+ {
734
+ "epoch": 0.52,
735
+ "grad_norm": 8.13821029663086,
736
+ "learning_rate": 2.4050000000000002e-05,
737
+ "loss": 5.1541,
738
+ "step": 520
739
+ },
740
+ {
741
+ "epoch": 0.525,
742
+ "grad_norm": 6.827164649963379,
743
+ "learning_rate": 2.38e-05,
744
+ "loss": 4.356,
745
+ "step": 525
746
+ },
747
+ {
748
+ "epoch": 0.53,
749
+ "grad_norm": 6.84028959274292,
750
+ "learning_rate": 2.355e-05,
751
+ "loss": 4.1476,
752
+ "step": 530
753
+ },
754
+ {
755
+ "epoch": 0.535,
756
+ "grad_norm": 10.003660202026367,
757
+ "learning_rate": 2.3300000000000004e-05,
758
+ "loss": 4.2211,
759
+ "step": 535
760
+ },
761
+ {
762
+ "epoch": 0.54,
763
+ "grad_norm": 8.519783973693848,
764
+ "learning_rate": 2.305e-05,
765
+ "loss": 3.9058,
766
+ "step": 540
767
+ },
768
+ {
769
+ "epoch": 0.545,
770
+ "grad_norm": 9.998250961303711,
771
+ "learning_rate": 2.2800000000000002e-05,
772
+ "loss": 3.9606,
773
+ "step": 545
774
+ },
775
+ {
776
+ "epoch": 0.55,
777
+ "grad_norm": 3.5864787101745605,
778
+ "learning_rate": 2.2550000000000003e-05,
779
+ "loss": 4.2792,
780
+ "step": 550
781
+ },
782
+ {
783
+ "epoch": 0.555,
784
+ "grad_norm": 2.15779709815979,
785
+ "learning_rate": 2.23e-05,
786
+ "loss": 4.2588,
787
+ "step": 555
788
+ },
789
+ {
790
+ "epoch": 0.56,
791
+ "grad_norm": 7.362779140472412,
792
+ "learning_rate": 2.205e-05,
793
+ "loss": 4.3834,
794
+ "step": 560
795
+ },
796
+ {
797
+ "epoch": 0.565,
798
+ "grad_norm": 3.7950263023376465,
799
+ "learning_rate": 2.18e-05,
800
+ "loss": 5.0136,
801
+ "step": 565
802
+ },
803
+ {
804
+ "epoch": 0.57,
805
+ "grad_norm": 6.706342697143555,
806
+ "learning_rate": 2.1550000000000002e-05,
807
+ "loss": 3.8674,
808
+ "step": 570
809
+ },
810
+ {
811
+ "epoch": 0.575,
812
+ "grad_norm": 4.262815475463867,
813
+ "learning_rate": 2.13e-05,
814
+ "loss": 3.2834,
815
+ "step": 575
816
+ },
817
+ {
818
+ "epoch": 0.58,
819
+ "grad_norm": 5.646935939788818,
820
+ "learning_rate": 2.105e-05,
821
+ "loss": 3.7601,
822
+ "step": 580
823
+ },
824
+ {
825
+ "epoch": 0.585,
826
+ "grad_norm": 4.463131427764893,
827
+ "learning_rate": 2.08e-05,
828
+ "loss": 3.9355,
829
+ "step": 585
830
+ },
831
+ {
832
+ "epoch": 0.59,
833
+ "grad_norm": 7.170533657073975,
834
+ "learning_rate": 2.055e-05,
835
+ "loss": 4.1956,
836
+ "step": 590
837
+ },
838
+ {
839
+ "epoch": 0.595,
840
+ "grad_norm": 4.189892768859863,
841
+ "learning_rate": 2.0300000000000002e-05,
842
+ "loss": 4.385,
843
+ "step": 595
844
+ },
845
+ {
846
+ "epoch": 0.6,
847
+ "grad_norm": 9.586647987365723,
848
+ "learning_rate": 2.0050000000000003e-05,
849
+ "loss": 4.3872,
850
+ "step": 600
851
+ },
852
+ {
853
+ "epoch": 0.605,
854
+ "grad_norm": 2.3141019344329834,
855
+ "learning_rate": 1.9800000000000004e-05,
856
+ "loss": 4.1622,
857
+ "step": 605
858
+ },
859
+ {
860
+ "epoch": 0.61,
861
+ "grad_norm": 11.550451278686523,
862
+ "learning_rate": 1.955e-05,
863
+ "loss": 4.8773,
864
+ "step": 610
865
+ },
866
+ {
867
+ "epoch": 0.615,
868
+ "grad_norm": 9.522382736206055,
869
+ "learning_rate": 1.93e-05,
870
+ "loss": 4.6614,
871
+ "step": 615
872
+ },
873
+ {
874
+ "epoch": 0.62,
875
+ "grad_norm": 10.683562278747559,
876
+ "learning_rate": 1.9050000000000002e-05,
877
+ "loss": 4.0905,
878
+ "step": 620
879
+ },
880
+ {
881
+ "epoch": 0.625,
882
+ "grad_norm": 2.9108145236968994,
883
+ "learning_rate": 1.88e-05,
884
+ "loss": 4.049,
885
+ "step": 625
886
+ },
887
+ {
888
+ "epoch": 0.63,
889
+ "grad_norm": 5.9698944091796875,
890
+ "learning_rate": 1.855e-05,
891
+ "loss": 3.7179,
892
+ "step": 630
893
+ },
894
+ {
895
+ "epoch": 0.635,
896
+ "grad_norm": 5.07297945022583,
897
+ "learning_rate": 1.83e-05,
898
+ "loss": 3.8728,
899
+ "step": 635
900
+ },
901
+ {
902
+ "epoch": 0.64,
903
+ "grad_norm": 7.098571300506592,
904
+ "learning_rate": 1.805e-05,
905
+ "loss": 3.7689,
906
+ "step": 640
907
+ },
908
+ {
909
+ "epoch": 0.645,
910
+ "grad_norm": 8.76673698425293,
911
+ "learning_rate": 1.78e-05,
912
+ "loss": 4.4143,
913
+ "step": 645
914
+ },
915
+ {
916
+ "epoch": 0.65,
917
+ "grad_norm": 2.236011266708374,
918
+ "learning_rate": 1.755e-05,
919
+ "loss": 3.4461,
920
+ "step": 650
921
+ },
922
+ {
923
+ "epoch": 0.655,
924
+ "grad_norm": 6.17922830581665,
925
+ "learning_rate": 1.73e-05,
926
+ "loss": 4.3143,
927
+ "step": 655
928
+ },
929
+ {
930
+ "epoch": 0.66,
931
+ "grad_norm": 5.667351722717285,
932
+ "learning_rate": 1.705e-05,
933
+ "loss": 4.19,
934
+ "step": 660
935
+ },
936
+ {
937
+ "epoch": 0.665,
938
+ "grad_norm": 13.386966705322266,
939
+ "learning_rate": 1.6800000000000002e-05,
940
+ "loss": 4.5092,
941
+ "step": 665
942
+ },
943
+ {
944
+ "epoch": 0.67,
945
+ "grad_norm": 7.265449523925781,
946
+ "learning_rate": 1.6550000000000002e-05,
947
+ "loss": 3.9085,
948
+ "step": 670
949
+ },
950
+ {
951
+ "epoch": 0.675,
952
+ "grad_norm": 9.232586860656738,
953
+ "learning_rate": 1.63e-05,
954
+ "loss": 5.0393,
955
+ "step": 675
956
+ },
957
+ {
958
+ "epoch": 0.68,
959
+ "grad_norm": 8.057585716247559,
960
+ "learning_rate": 1.605e-05,
961
+ "loss": 3.9848,
962
+ "step": 680
963
+ },
964
+ {
965
+ "epoch": 0.685,
966
+ "grad_norm": 2.304192543029785,
967
+ "learning_rate": 1.58e-05,
968
+ "loss": 4.3656,
969
+ "step": 685
970
+ },
971
+ {
972
+ "epoch": 0.69,
973
+ "grad_norm": 10.112837791442871,
974
+ "learning_rate": 1.5550000000000002e-05,
975
+ "loss": 4.4643,
976
+ "step": 690
977
+ },
978
+ {
979
+ "epoch": 0.695,
980
+ "grad_norm": 6.0983757972717285,
981
+ "learning_rate": 1.53e-05,
982
+ "loss": 4.1222,
983
+ "step": 695
984
+ },
985
+ {
986
+ "epoch": 0.7,
987
+ "grad_norm": 5.149627685546875,
988
+ "learning_rate": 1.505e-05,
989
+ "loss": 4.0705,
990
+ "step": 700
991
+ },
992
+ {
993
+ "epoch": 0.705,
994
+ "grad_norm": 8.752741813659668,
995
+ "learning_rate": 1.48e-05,
996
+ "loss": 4.4209,
997
+ "step": 705
998
+ },
999
+ {
1000
+ "epoch": 0.71,
1001
+ "grad_norm": 4.792961120605469,
1002
+ "learning_rate": 1.455e-05,
1003
+ "loss": 4.1384,
1004
+ "step": 710
1005
+ },
1006
+ {
1007
+ "epoch": 0.715,
1008
+ "grad_norm": 7.746492385864258,
1009
+ "learning_rate": 1.43e-05,
1010
+ "loss": 4.1497,
1011
+ "step": 715
1012
+ },
1013
+ {
1014
+ "epoch": 0.72,
1015
+ "grad_norm": 6.519612789154053,
1016
+ "learning_rate": 1.4050000000000003e-05,
1017
+ "loss": 4.3319,
1018
+ "step": 720
1019
+ },
1020
+ {
1021
+ "epoch": 0.725,
1022
+ "grad_norm": 8.192032814025879,
1023
+ "learning_rate": 1.3800000000000002e-05,
1024
+ "loss": 3.6076,
1025
+ "step": 725
1026
+ },
1027
+ {
1028
+ "epoch": 0.73,
1029
+ "grad_norm": 9.460343360900879,
1030
+ "learning_rate": 1.3550000000000002e-05,
1031
+ "loss": 3.983,
1032
+ "step": 730
1033
+ },
1034
+ {
1035
+ "epoch": 0.735,
1036
+ "grad_norm": 4.7448225021362305,
1037
+ "learning_rate": 1.3300000000000001e-05,
1038
+ "loss": 4.4842,
1039
+ "step": 735
1040
+ },
1041
+ {
1042
+ "epoch": 0.74,
1043
+ "grad_norm": 2.821718454360962,
1044
+ "learning_rate": 1.305e-05,
1045
+ "loss": 4.7517,
1046
+ "step": 740
1047
+ },
1048
+ {
1049
+ "epoch": 0.745,
1050
+ "grad_norm": 7.751161575317383,
1051
+ "learning_rate": 1.2800000000000001e-05,
1052
+ "loss": 3.5541,
1053
+ "step": 745
1054
+ },
1055
+ {
1056
+ "epoch": 0.75,
1057
+ "grad_norm": 7.190122127532959,
1058
+ "learning_rate": 1.255e-05,
1059
+ "loss": 4.5604,
1060
+ "step": 750
1061
+ },
1062
+ {
1063
+ "epoch": 0.755,
1064
+ "grad_norm": 6.862963676452637,
1065
+ "learning_rate": 1.23e-05,
1066
+ "loss": 4.2742,
1067
+ "step": 755
1068
+ },
1069
+ {
1070
+ "epoch": 0.76,
1071
+ "grad_norm": 7.691545486450195,
1072
+ "learning_rate": 1.205e-05,
1073
+ "loss": 3.5938,
1074
+ "step": 760
1075
+ },
1076
+ {
1077
+ "epoch": 0.765,
1078
+ "grad_norm": 11.704629898071289,
1079
+ "learning_rate": 1.18e-05,
1080
+ "loss": 4.4012,
1081
+ "step": 765
1082
+ },
1083
+ {
1084
+ "epoch": 0.77,
1085
+ "grad_norm": 9.413323402404785,
1086
+ "learning_rate": 1.1550000000000001e-05,
1087
+ "loss": 3.9794,
1088
+ "step": 770
1089
+ },
1090
+ {
1091
+ "epoch": 0.775,
1092
+ "grad_norm": 22.67475128173828,
1093
+ "learning_rate": 1.13e-05,
1094
+ "loss": 4.078,
1095
+ "step": 775
1096
+ },
1097
+ {
1098
+ "epoch": 0.78,
1099
+ "grad_norm": 2.4493584632873535,
1100
+ "learning_rate": 1.1050000000000001e-05,
1101
+ "loss": 3.9336,
1102
+ "step": 780
1103
+ },
1104
+ {
1105
+ "epoch": 0.785,
1106
+ "grad_norm": 8.993667602539062,
1107
+ "learning_rate": 1.08e-05,
1108
+ "loss": 4.5752,
1109
+ "step": 785
1110
+ },
1111
+ {
1112
+ "epoch": 0.79,
1113
+ "grad_norm": 4.641674995422363,
1114
+ "learning_rate": 1.055e-05,
1115
+ "loss": 4.6423,
1116
+ "step": 790
1117
+ },
1118
+ {
1119
+ "epoch": 0.795,
1120
+ "grad_norm": 2.4793174266815186,
1121
+ "learning_rate": 1.03e-05,
1122
+ "loss": 4.2724,
1123
+ "step": 795
1124
+ },
1125
+ {
1126
+ "epoch": 0.8,
1127
+ "grad_norm": 5.038002014160156,
1128
+ "learning_rate": 1.005e-05,
1129
+ "loss": 4.56,
1130
+ "step": 800
1131
+ },
1132
+ {
1133
+ "epoch": 0.805,
1134
+ "grad_norm": 4.353021144866943,
1135
+ "learning_rate": 9.800000000000001e-06,
1136
+ "loss": 3.8578,
1137
+ "step": 805
1138
+ },
1139
+ {
1140
+ "epoch": 0.81,
1141
+ "grad_norm": 8.213798522949219,
1142
+ "learning_rate": 9.55e-06,
1143
+ "loss": 4.1989,
1144
+ "step": 810
1145
+ },
1146
+ {
1147
+ "epoch": 0.815,
1148
+ "grad_norm": 5.633493900299072,
1149
+ "learning_rate": 9.3e-06,
1150
+ "loss": 4.2873,
1151
+ "step": 815
1152
+ },
1153
+ {
1154
+ "epoch": 0.82,
1155
+ "grad_norm": 7.785790920257568,
1156
+ "learning_rate": 9.05e-06,
1157
+ "loss": 4.2596,
1158
+ "step": 820
1159
+ },
1160
+ {
1161
+ "epoch": 0.825,
1162
+ "grad_norm": 6.220232009887695,
1163
+ "learning_rate": 8.8e-06,
1164
+ "loss": 4.5386,
1165
+ "step": 825
1166
+ },
1167
+ {
1168
+ "epoch": 0.83,
1169
+ "grad_norm": 4.381409168243408,
1170
+ "learning_rate": 8.550000000000001e-06,
1171
+ "loss": 4.0956,
1172
+ "step": 830
1173
+ },
1174
+ {
1175
+ "epoch": 0.835,
1176
+ "grad_norm": 7.15064811706543,
1177
+ "learning_rate": 8.3e-06,
1178
+ "loss": 4.1738,
1179
+ "step": 835
1180
+ },
1181
+ {
1182
+ "epoch": 0.84,
1183
+ "grad_norm": 10.692646980285645,
1184
+ "learning_rate": 8.050000000000001e-06,
1185
+ "loss": 4.319,
1186
+ "step": 840
1187
+ },
1188
+ {
1189
+ "epoch": 0.845,
1190
+ "grad_norm": 6.923873424530029,
1191
+ "learning_rate": 7.8e-06,
1192
+ "loss": 4.1981,
1193
+ "step": 845
1194
+ },
1195
+ {
1196
+ "epoch": 0.85,
1197
+ "grad_norm": 11.210564613342285,
1198
+ "learning_rate": 7.55e-06,
1199
+ "loss": 3.8061,
1200
+ "step": 850
1201
+ },
1202
+ {
1203
+ "epoch": 0.855,
1204
+ "grad_norm": 4.295876502990723,
1205
+ "learning_rate": 7.2999999999999996e-06,
1206
+ "loss": 5.1272,
1207
+ "step": 855
1208
+ },
1209
+ {
1210
+ "epoch": 0.86,
1211
+ "grad_norm": 5.475194454193115,
1212
+ "learning_rate": 7.049999999999999e-06,
1213
+ "loss": 4.3979,
1214
+ "step": 860
1215
+ },
1216
+ {
1217
+ "epoch": 0.865,
1218
+ "grad_norm": 8.845963478088379,
1219
+ "learning_rate": 6.800000000000001e-06,
1220
+ "loss": 4.5577,
1221
+ "step": 865
1222
+ },
1223
+ {
1224
+ "epoch": 0.87,
1225
+ "grad_norm": 4.435886859893799,
1226
+ "learning_rate": 6.550000000000001e-06,
1227
+ "loss": 4.2463,
1228
+ "step": 870
1229
+ },
1230
+ {
1231
+ "epoch": 0.875,
1232
+ "grad_norm": 4.606530666351318,
1233
+ "learning_rate": 6.300000000000001e-06,
1234
+ "loss": 4.3915,
1235
+ "step": 875
1236
+ },
1237
+ {
1238
+ "epoch": 0.88,
1239
+ "grad_norm": 10.091827392578125,
1240
+ "learning_rate": 6.0500000000000005e-06,
1241
+ "loss": 3.8902,
1242
+ "step": 880
1243
+ },
1244
+ {
1245
+ "epoch": 0.885,
1246
+ "grad_norm": 8.739742279052734,
1247
+ "learning_rate": 5.8e-06,
1248
+ "loss": 4.2864,
1249
+ "step": 885
1250
+ },
1251
+ {
1252
+ "epoch": 0.89,
1253
+ "grad_norm": 6.266073226928711,
1254
+ "learning_rate": 5.55e-06,
1255
+ "loss": 3.8749,
1256
+ "step": 890
1257
+ },
1258
+ {
1259
+ "epoch": 0.895,
1260
+ "grad_norm": 6.344130516052246,
1261
+ "learning_rate": 5.3e-06,
1262
+ "loss": 3.9873,
1263
+ "step": 895
1264
+ },
1265
+ {
1266
+ "epoch": 0.9,
1267
+ "grad_norm": 5.76676082611084,
1268
+ "learning_rate": 5.050000000000001e-06,
1269
+ "loss": 4.7841,
1270
+ "step": 900
1271
+ },
1272
+ {
1273
+ "epoch": 0.905,
1274
+ "grad_norm": 9.59454345703125,
1275
+ "learning_rate": 4.800000000000001e-06,
1276
+ "loss": 3.959,
1277
+ "step": 905
1278
+ },
1279
+ {
1280
+ "epoch": 0.91,
1281
+ "grad_norm": 12.02519416809082,
1282
+ "learning_rate": 4.5500000000000005e-06,
1283
+ "loss": 4.4615,
1284
+ "step": 910
1285
+ },
1286
+ {
1287
+ "epoch": 0.915,
1288
+ "grad_norm": 8.733320236206055,
1289
+ "learning_rate": 4.2999999999999995e-06,
1290
+ "loss": 3.8876,
1291
+ "step": 915
1292
+ },
1293
+ {
1294
+ "epoch": 0.92,
1295
+ "grad_norm": 5.37556791305542,
1296
+ "learning_rate": 4.05e-06,
1297
+ "loss": 4.469,
1298
+ "step": 920
1299
+ },
1300
+ {
1301
+ "epoch": 0.925,
1302
+ "grad_norm": 4.086933135986328,
1303
+ "learning_rate": 3.8e-06,
1304
+ "loss": 4.1404,
1305
+ "step": 925
1306
+ },
1307
+ {
1308
+ "epoch": 0.93,
1309
+ "grad_norm": 9.399847984313965,
1310
+ "learning_rate": 3.55e-06,
1311
+ "loss": 4.5059,
1312
+ "step": 930
1313
+ },
1314
+ {
1315
+ "epoch": 0.935,
1316
+ "grad_norm": 5.258869647979736,
1317
+ "learning_rate": 3.3e-06,
1318
+ "loss": 4.1027,
1319
+ "step": 935
1320
+ },
1321
+ {
1322
+ "epoch": 0.94,
1323
+ "grad_norm": 4.01800537109375,
1324
+ "learning_rate": 3.05e-06,
1325
+ "loss": 4.5203,
1326
+ "step": 940
1327
+ },
1328
+ {
1329
+ "epoch": 0.945,
1330
+ "grad_norm": 2.5132651329040527,
1331
+ "learning_rate": 2.8000000000000003e-06,
1332
+ "loss": 3.9156,
1333
+ "step": 945
1334
+ },
1335
+ {
1336
+ "epoch": 0.95,
1337
+ "grad_norm": 13.908726692199707,
1338
+ "learning_rate": 2.55e-06,
1339
+ "loss": 3.7899,
1340
+ "step": 950
1341
+ },
1342
+ {
1343
+ "epoch": 0.955,
1344
+ "grad_norm": 2.58840012550354,
1345
+ "learning_rate": 2.3e-06,
1346
+ "loss": 4.5708,
1347
+ "step": 955
1348
+ },
1349
+ {
1350
+ "epoch": 0.96,
1351
+ "grad_norm": 4.768102169036865,
1352
+ "learning_rate": 2.0500000000000003e-06,
1353
+ "loss": 4.4316,
1354
+ "step": 960
1355
+ },
1356
+ {
1357
+ "epoch": 0.965,
1358
+ "grad_norm": 4.838945388793945,
1359
+ "learning_rate": 1.8e-06,
1360
+ "loss": 5.1041,
1361
+ "step": 965
1362
+ },
1363
+ {
1364
+ "epoch": 0.97,
1365
+ "grad_norm": 10.673234939575195,
1366
+ "learning_rate": 1.55e-06,
1367
+ "loss": 3.9105,
1368
+ "step": 970
1369
+ },
1370
+ {
1371
+ "epoch": 0.975,
1372
+ "grad_norm": 3.4578492641448975,
1373
+ "learning_rate": 1.3e-06,
1374
+ "loss": 4.7147,
1375
+ "step": 975
1376
+ },
1377
+ {
1378
+ "epoch": 0.98,
1379
+ "grad_norm": 2.634669065475464,
1380
+ "learning_rate": 1.0500000000000001e-06,
1381
+ "loss": 3.9731,
1382
+ "step": 980
1383
+ },
1384
+ {
1385
+ "epoch": 0.985,
1386
+ "grad_norm": 5.2007904052734375,
1387
+ "learning_rate": 8.000000000000001e-07,
1388
+ "loss": 3.6674,
1389
+ "step": 985
1390
+ },
1391
+ {
1392
+ "epoch": 0.99,
1393
+ "grad_norm": 9.363544464111328,
1394
+ "learning_rate": 5.5e-07,
1395
+ "loss": 3.8802,
1396
+ "step": 990
1397
+ },
1398
+ {
1399
+ "epoch": 0.995,
1400
+ "grad_norm": 16.156009674072266,
1401
+ "learning_rate": 3.0000000000000004e-07,
1402
+ "loss": 4.784,
1403
+ "step": 995
1404
+ },
1405
+ {
1406
+ "epoch": 1.0,
1407
+ "grad_norm": 12.3667631149292,
1408
+ "learning_rate": 5.0000000000000004e-08,
1409
+ "loss": 3.9393,
1410
+ "step": 1000
1411
+ }
1412
+ ],
1413
+ "logging_steps": 5,
1414
+ "max_steps": 1000,
1415
+ "num_input_tokens_seen": 0,
1416
+ "num_train_epochs": 1,
1417
+ "save_steps": 25,
1418
+ "stateful_callbacks": {
1419
+ "TrainerControl": {
1420
+ "args": {
1421
+ "should_epoch_stop": false,
1422
+ "should_evaluate": false,
1423
+ "should_log": false,
1424
+ "should_save": true,
1425
+ "should_training_stop": true
1426
+ },
1427
+ "attributes": {}
1428
+ }
1429
+ },
1430
+ "total_flos": 340040613888000.0,
1431
+ "train_batch_size": 1,
1432
+ "trial_name": null,
1433
+ "trial_params": null
1434
+ }
just_lora_weights/checkpoint-1000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b32254dfaf0c82ecfc54cd31b94eb7c8ebb2e7d832a31fe43128bcff0fa132b3
3
+ size 5713
just_lora_weights/checkpoint-1000/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
just_lora_weights/checkpoint-125/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen3-0.6B
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.15.2
just_lora_weights/checkpoint-125/adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen3-0.6B",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 16,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.05,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 16,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "v_proj",
28
+ "q_proj"
29
+ ],
30
+ "task_type": "CAUSAL_LM",
31
+ "trainable_token_indices": null,
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
just_lora_weights/checkpoint-125/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16989a57d57a2a094e9e21d95bc7623080bd03ae7fd388f5f5ac76345c957c71
3
+ size 9189904
just_lora_weights/checkpoint-125/added_tokens.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</think>": 151668,
3
+ "</tool_call>": 151658,
4
+ "</tool_response>": 151666,
5
+ "<think>": 151667,
6
+ "<tool_call>": 151657,
7
+ "<tool_response>": 151665,
8
+ "<|box_end|>": 151649,
9
+ "<|box_start|>": 151648,
10
+ "<|endoftext|>": 151643,
11
+ "<|file_sep|>": 151664,
12
+ "<|fim_middle|>": 151660,
13
+ "<|fim_pad|>": 151662,
14
+ "<|fim_prefix|>": 151659,
15
+ "<|fim_suffix|>": 151661,
16
+ "<|im_end|>": 151645,
17
+ "<|im_start|>": 151644,
18
+ "<|image_pad|>": 151655,
19
+ "<|object_ref_end|>": 151647,
20
+ "<|object_ref_start|>": 151646,
21
+ "<|quad_end|>": 151651,
22
+ "<|quad_start|>": 151650,
23
+ "<|repo_name|>": 151663,
24
+ "<|video_pad|>": 151656,
25
+ "<|vision_end|>": 151653,
26
+ "<|vision_pad|>": 151654,
27
+ "<|vision_start|>": 151652
28
+ }
just_lora_weights/checkpoint-125/chat_template.jinja ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0].role == 'system' %}
4
+ {{- messages[0].content + '\n\n' }}
5
+ {%- endif %}
6
+ {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
7
+ {%- for tool in tools %}
8
+ {{- "\n" }}
9
+ {{- tool | tojson }}
10
+ {%- endfor %}
11
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
12
+ {%- else %}
13
+ {%- if messages[0].role == 'system' %}
14
+ {{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
15
+ {%- endif %}
16
+ {%- endif %}
17
+ {%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
18
+ {%- for message in messages[::-1] %}
19
+ {%- set index = (messages|length - 1) - loop.index0 %}
20
+ {%- if ns.multi_step_tool and message.role == "user" and message.content is string and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}
21
+ {%- set ns.multi_step_tool = false %}
22
+ {%- set ns.last_query_index = index %}
23
+ {%- endif %}
24
+ {%- endfor %}
25
+ {%- for message in messages %}
26
+ {%- if message.content is string %}
27
+ {%- set content = message.content %}
28
+ {%- else %}
29
+ {%- set content = '' %}
30
+ {%- endif %}
31
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
32
+ {{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
33
+ {%- elif message.role == "assistant" %}
34
+ {%- set reasoning_content = '' %}
35
+ {%- if message.reasoning_content is string %}
36
+ {%- set reasoning_content = message.reasoning_content %}
37
+ {%- else %}
38
+ {%- if '</think>' in content %}
39
+ {%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
40
+ {%- set content = content.split('</think>')[-1].lstrip('\n') %}
41
+ {%- endif %}
42
+ {%- endif %}
43
+ {%- if loop.index0 > ns.last_query_index %}
44
+ {%- if loop.last or (not loop.last and reasoning_content) %}
45
+ {{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
46
+ {%- else %}
47
+ {{- '<|im_start|>' + message.role + '\n' + content }}
48
+ {%- endif %}
49
+ {%- else %}
50
+ {{- '<|im_start|>' + message.role + '\n' + content }}
51
+ {%- endif %}
52
+ {%- if message.tool_calls %}
53
+ {%- for tool_call in message.tool_calls %}
54
+ {%- if (loop.first and content) or (not loop.first) %}
55
+ {{- '\n' }}
56
+ {%- endif %}
57
+ {%- if tool_call.function %}
58
+ {%- set tool_call = tool_call.function %}
59
+ {%- endif %}
60
+ {{- '<tool_call>\n{"name": "' }}
61
+ {{- tool_call.name }}
62
+ {{- '", "arguments": ' }}
63
+ {%- if tool_call.arguments is string %}
64
+ {{- tool_call.arguments }}
65
+ {%- else %}
66
+ {{- tool_call.arguments | tojson }}
67
+ {%- endif %}
68
+ {{- '}\n</tool_call>' }}
69
+ {%- endfor %}
70
+ {%- endif %}
71
+ {{- '<|im_end|>\n' }}
72
+ {%- elif message.role == "tool" %}
73
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
74
+ {{- '<|im_start|>user' }}
75
+ {%- endif %}
76
+ {{- '\n<tool_response>\n' }}
77
+ {{- content }}
78
+ {{- '\n</tool_response>' }}
79
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
80
+ {{- '<|im_end|>\n' }}
81
+ {%- endif %}
82
+ {%- endif %}
83
+ {%- endfor %}
84
+ {%- if add_generation_prompt %}
85
+ {{- '<|im_start|>assistant\n' }}
86
+ {%- if enable_thinking is defined and enable_thinking is false %}
87
+ {{- '<think>\n\n</think>\n\n' }}
88
+ {%- endif %}
89
+ {%- endif %}
just_lora_weights/checkpoint-125/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
just_lora_weights/checkpoint-125/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3634cb502d1afe6a7c00dcec8edf677bba32f799d7ea239196c3e207ca597f2f
3
+ size 18441675
just_lora_weights/checkpoint-125/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cddf27219365242ec1046a3532a63a24c3f350c77f100e4f973369db2cc849d
3
+ size 14455
just_lora_weights/checkpoint-125/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16a590e239778b85b9e0b27bdd95750c3cda5ceffcdcf69f7189eedfb1880ec0
3
+ size 1465
just_lora_weights/checkpoint-125/special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
just_lora_weights/checkpoint-125/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aeb13307a71acd8fe81861d94ad54ab689df773318809eed3cbe794b4492dae4
3
+ size 11422654
just_lora_weights/checkpoint-125/tokenizer_config.json ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ },
181
+ "151665": {
182
+ "content": "<tool_response>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": false
188
+ },
189
+ "151666": {
190
+ "content": "</tool_response>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": false
196
+ },
197
+ "151667": {
198
+ "content": "<think>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": false
204
+ },
205
+ "151668": {
206
+ "content": "</think>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": false
212
+ }
213
+ },
214
+ "additional_special_tokens": [
215
+ "<|im_start|>",
216
+ "<|im_end|>",
217
+ "<|object_ref_start|>",
218
+ "<|object_ref_end|>",
219
+ "<|box_start|>",
220
+ "<|box_end|>",
221
+ "<|quad_start|>",
222
+ "<|quad_end|>",
223
+ "<|vision_start|>",
224
+ "<|vision_end|>",
225
+ "<|vision_pad|>",
226
+ "<|image_pad|>",
227
+ "<|video_pad|>"
228
+ ],
229
+ "bos_token": null,
230
+ "clean_up_tokenization_spaces": false,
231
+ "eos_token": "<|im_end|>",
232
+ "errors": "replace",
233
+ "extra_special_tokens": {},
234
+ "model_max_length": 131072,
235
+ "pad_token": "<|endoftext|>",
236
+ "split_special_tokens": false,
237
+ "tokenizer_class": "Qwen2Tokenizer",
238
+ "unk_token": null
239
+ }
just_lora_weights/checkpoint-125/trainer_state.json ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 0.125,
6
+ "eval_steps": 500,
7
+ "global_step": 125,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.005,
14
+ "grad_norm": 4.4095964431762695,
15
+ "learning_rate": 4.9800000000000004e-05,
16
+ "loss": 5.4329,
17
+ "step": 5
18
+ },
19
+ {
20
+ "epoch": 0.01,
21
+ "grad_norm": 2.658348798751831,
22
+ "learning_rate": 4.9550000000000005e-05,
23
+ "loss": 4.6734,
24
+ "step": 10
25
+ },
26
+ {
27
+ "epoch": 0.015,
28
+ "grad_norm": 3.4242470264434814,
29
+ "learning_rate": 4.93e-05,
30
+ "loss": 4.8327,
31
+ "step": 15
32
+ },
33
+ {
34
+ "epoch": 0.02,
35
+ "grad_norm": 4.363213539123535,
36
+ "learning_rate": 4.905e-05,
37
+ "loss": 4.4238,
38
+ "step": 20
39
+ },
40
+ {
41
+ "epoch": 0.025,
42
+ "grad_norm": 2.531619071960449,
43
+ "learning_rate": 4.88e-05,
44
+ "loss": 3.9284,
45
+ "step": 25
46
+ },
47
+ {
48
+ "epoch": 0.03,
49
+ "grad_norm": 4.686695098876953,
50
+ "learning_rate": 4.855e-05,
51
+ "loss": 4.6794,
52
+ "step": 30
53
+ },
54
+ {
55
+ "epoch": 0.035,
56
+ "grad_norm": 4.860099792480469,
57
+ "learning_rate": 4.83e-05,
58
+ "loss": 5.0624,
59
+ "step": 35
60
+ },
61
+ {
62
+ "epoch": 0.04,
63
+ "grad_norm": 8.296855926513672,
64
+ "learning_rate": 4.805e-05,
65
+ "loss": 4.8011,
66
+ "step": 40
67
+ },
68
+ {
69
+ "epoch": 0.045,
70
+ "grad_norm": 4.80878210067749,
71
+ "learning_rate": 4.78e-05,
72
+ "loss": 5.495,
73
+ "step": 45
74
+ },
75
+ {
76
+ "epoch": 0.05,
77
+ "grad_norm": 4.766687393188477,
78
+ "learning_rate": 4.755e-05,
79
+ "loss": 4.6778,
80
+ "step": 50
81
+ },
82
+ {
83
+ "epoch": 0.055,
84
+ "grad_norm": 2.7137186527252197,
85
+ "learning_rate": 4.73e-05,
86
+ "loss": 4.5044,
87
+ "step": 55
88
+ },
89
+ {
90
+ "epoch": 0.06,
91
+ "grad_norm": 9.891243934631348,
92
+ "learning_rate": 4.705e-05,
93
+ "loss": 4.4744,
94
+ "step": 60
95
+ },
96
+ {
97
+ "epoch": 0.065,
98
+ "grad_norm": 3.6516237258911133,
99
+ "learning_rate": 4.6800000000000006e-05,
100
+ "loss": 4.6576,
101
+ "step": 65
102
+ },
103
+ {
104
+ "epoch": 0.07,
105
+ "grad_norm": 5.687813758850098,
106
+ "learning_rate": 4.655000000000001e-05,
107
+ "loss": 5.1204,
108
+ "step": 70
109
+ },
110
+ {
111
+ "epoch": 0.075,
112
+ "grad_norm": 4.273693561553955,
113
+ "learning_rate": 4.630000000000001e-05,
114
+ "loss": 5.361,
115
+ "step": 75
116
+ },
117
+ {
118
+ "epoch": 0.08,
119
+ "grad_norm": 6.802962779998779,
120
+ "learning_rate": 4.605e-05,
121
+ "loss": 5.5483,
122
+ "step": 80
123
+ },
124
+ {
125
+ "epoch": 0.085,
126
+ "grad_norm": 2.7016360759735107,
127
+ "learning_rate": 4.58e-05,
128
+ "loss": 4.5909,
129
+ "step": 85
130
+ },
131
+ {
132
+ "epoch": 0.09,
133
+ "grad_norm": 8.201666831970215,
134
+ "learning_rate": 4.555e-05,
135
+ "loss": 4.2807,
136
+ "step": 90
137
+ },
138
+ {
139
+ "epoch": 0.095,
140
+ "grad_norm": 3.8271970748901367,
141
+ "learning_rate": 4.53e-05,
142
+ "loss": 5.5645,
143
+ "step": 95
144
+ },
145
+ {
146
+ "epoch": 0.1,
147
+ "grad_norm": 3.17282772064209,
148
+ "learning_rate": 4.5050000000000004e-05,
149
+ "loss": 4.5845,
150
+ "step": 100
151
+ },
152
+ {
153
+ "epoch": 0.105,
154
+ "grad_norm": 2.9256932735443115,
155
+ "learning_rate": 4.4800000000000005e-05,
156
+ "loss": 4.6003,
157
+ "step": 105
158
+ },
159
+ {
160
+ "epoch": 0.11,
161
+ "grad_norm": 10.4873046875,
162
+ "learning_rate": 4.4550000000000005e-05,
163
+ "loss": 4.6453,
164
+ "step": 110
165
+ },
166
+ {
167
+ "epoch": 0.115,
168
+ "grad_norm": 2.9158883094787598,
169
+ "learning_rate": 4.43e-05,
170
+ "loss": 4.8935,
171
+ "step": 115
172
+ },
173
+ {
174
+ "epoch": 0.12,
175
+ "grad_norm": 3.5588793754577637,
176
+ "learning_rate": 4.405e-05,
177
+ "loss": 3.9582,
178
+ "step": 120
179
+ },
180
+ {
181
+ "epoch": 0.125,
182
+ "grad_norm": 2.7040491104125977,
183
+ "learning_rate": 4.38e-05,
184
+ "loss": 4.6688,
185
+ "step": 125
186
+ }
187
+ ],
188
+ "logging_steps": 5,
189
+ "max_steps": 1000,
190
+ "num_input_tokens_seen": 0,
191
+ "num_train_epochs": 1,
192
+ "save_steps": 25,
193
+ "stateful_callbacks": {
194
+ "TrainerControl": {
195
+ "args": {
196
+ "should_epoch_stop": false,
197
+ "should_evaluate": false,
198
+ "should_log": false,
199
+ "should_save": true,
200
+ "should_training_stop": false
201
+ },
202
+ "attributes": {}
203
+ }
204
+ },
205
+ "total_flos": 42505076736000.0,
206
+ "train_batch_size": 1,
207
+ "trial_name": null,
208
+ "trial_params": null
209
+ }
just_lora_weights/checkpoint-125/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b32254dfaf0c82ecfc54cd31b94eb7c8ebb2e7d832a31fe43128bcff0fa132b3
3
+ size 5713
just_lora_weights/checkpoint-125/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
just_lora_weights/checkpoint-150/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen3-0.6B
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.15.2
just_lora_weights/checkpoint-150/adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen3-0.6B",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 16,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.05,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 16,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "v_proj",
28
+ "q_proj"
29
+ ],
30
+ "task_type": "CAUSAL_LM",
31
+ "trainable_token_indices": null,
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
just_lora_weights/checkpoint-150/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d7c9a8b78e36adff84b178dbe829e123eaa9ea5dd689cc0b7fc4975d8dc4d9f
3
+ size 9189904
just_lora_weights/checkpoint-150/added_tokens.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</think>": 151668,
3
+ "</tool_call>": 151658,
4
+ "</tool_response>": 151666,
5
+ "<think>": 151667,
6
+ "<tool_call>": 151657,
7
+ "<tool_response>": 151665,
8
+ "<|box_end|>": 151649,
9
+ "<|box_start|>": 151648,
10
+ "<|endoftext|>": 151643,
11
+ "<|file_sep|>": 151664,
12
+ "<|fim_middle|>": 151660,
13
+ "<|fim_pad|>": 151662,
14
+ "<|fim_prefix|>": 151659,
15
+ "<|fim_suffix|>": 151661,
16
+ "<|im_end|>": 151645,
17
+ "<|im_start|>": 151644,
18
+ "<|image_pad|>": 151655,
19
+ "<|object_ref_end|>": 151647,
20
+ "<|object_ref_start|>": 151646,
21
+ "<|quad_end|>": 151651,
22
+ "<|quad_start|>": 151650,
23
+ "<|repo_name|>": 151663,
24
+ "<|video_pad|>": 151656,
25
+ "<|vision_end|>": 151653,
26
+ "<|vision_pad|>": 151654,
27
+ "<|vision_start|>": 151652
28
+ }