Upload folder using huggingface_hub
Browse files- .gitattributes +1 -0
- added_tokens.json +24 -0
- args.json +366 -0
- config.json +30 -0
- generation_config.json +6 -0
- merges.txt +0 -0
- model-00001-of-00004.safetensors +3 -0
- model-00002-of-00004.safetensors +3 -0
- model-00003-of-00004.safetensors +3 -0
- model-00004-of-00004.safetensors +3 -0
- model.safetensors.index.json +346 -0
- special_tokens_map.json +31 -0
- tokenizer.json +3 -0
- tokenizer_config.json +208 -0
- trainer_state.json +334 -0
- training_args.bin +3 -0
- vocab.json +0 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
added_tokens.json
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"</tool_call>": 151658,
|
| 3 |
+
"<tool_call>": 151657,
|
| 4 |
+
"<|box_end|>": 151649,
|
| 5 |
+
"<|box_start|>": 151648,
|
| 6 |
+
"<|endoftext|>": 151643,
|
| 7 |
+
"<|file_sep|>": 151664,
|
| 8 |
+
"<|fim_middle|>": 151660,
|
| 9 |
+
"<|fim_pad|>": 151662,
|
| 10 |
+
"<|fim_prefix|>": 151659,
|
| 11 |
+
"<|fim_suffix|>": 151661,
|
| 12 |
+
"<|im_end|>": 151645,
|
| 13 |
+
"<|im_start|>": 151644,
|
| 14 |
+
"<|image_pad|>": 151655,
|
| 15 |
+
"<|object_ref_end|>": 151647,
|
| 16 |
+
"<|object_ref_start|>": 151646,
|
| 17 |
+
"<|quad_end|>": 151651,
|
| 18 |
+
"<|quad_start|>": 151650,
|
| 19 |
+
"<|repo_name|>": 151663,
|
| 20 |
+
"<|video_pad|>": 151656,
|
| 21 |
+
"<|vision_end|>": 151653,
|
| 22 |
+
"<|vision_pad|>": 151654,
|
| 23 |
+
"<|vision_start|>": 151652
|
| 24 |
+
}
|
args.json
ADDED
|
@@ -0,0 +1,366 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model": "/data/yubo/models/Qwen2.5-Math-7B",
|
| 3 |
+
"model_type": "qwen2_5_math",
|
| 4 |
+
"model_revision": null,
|
| 5 |
+
"task_type": "causal_lm",
|
| 6 |
+
"torch_dtype": "bfloat16",
|
| 7 |
+
"attn_impl": "flash_attn",
|
| 8 |
+
"num_labels": null,
|
| 9 |
+
"problem_type": null,
|
| 10 |
+
"rope_scaling": null,
|
| 11 |
+
"device_map": null,
|
| 12 |
+
"max_memory": {},
|
| 13 |
+
"local_repo_path": null,
|
| 14 |
+
"template": "qwen2_5_math",
|
| 15 |
+
"system": null,
|
| 16 |
+
"max_length": null,
|
| 17 |
+
"truncation_strategy": "delete",
|
| 18 |
+
"max_pixels": null,
|
| 19 |
+
"agent_template": null,
|
| 20 |
+
"norm_bbox": null,
|
| 21 |
+
"response_prefix": null,
|
| 22 |
+
"padding_side": "right",
|
| 23 |
+
"loss_scale": "default",
|
| 24 |
+
"sequence_parallel_size": 1,
|
| 25 |
+
"use_chat_template": true,
|
| 26 |
+
"template_backend": "swift",
|
| 27 |
+
"dataset": [
|
| 28 |
+
"/data/yubo/CriticCoT/dsr_scripts_0514/dsr_one-shot_train_data_0514_p0.jsonl"
|
| 29 |
+
],
|
| 30 |
+
"val_dataset": [],
|
| 31 |
+
"split_dataset_ratio": 0.0,
|
| 32 |
+
"data_seed": 42,
|
| 33 |
+
"dataset_num_proc": 4,
|
| 34 |
+
"dataset_shuffle": true,
|
| 35 |
+
"val_dataset_shuffle": false,
|
| 36 |
+
"streaming": false,
|
| 37 |
+
"interleave_prob": null,
|
| 38 |
+
"stopping_strategy": "first_exhausted",
|
| 39 |
+
"shuffle_buffer_size": 1000,
|
| 40 |
+
"enable_cache": false,
|
| 41 |
+
"download_mode": "reuse_dataset_if_exists",
|
| 42 |
+
"columns": {},
|
| 43 |
+
"strict": false,
|
| 44 |
+
"remove_unused_columns": false,
|
| 45 |
+
"model_name": [
|
| 46 |
+
null,
|
| 47 |
+
null
|
| 48 |
+
],
|
| 49 |
+
"model_author": [
|
| 50 |
+
null,
|
| 51 |
+
null
|
| 52 |
+
],
|
| 53 |
+
"custom_dataset_info": [],
|
| 54 |
+
"quant_method": null,
|
| 55 |
+
"quant_bits": null,
|
| 56 |
+
"hqq_axis": null,
|
| 57 |
+
"bnb_4bit_compute_dtype": "bfloat16",
|
| 58 |
+
"bnb_4bit_quant_type": "nf4",
|
| 59 |
+
"bnb_4bit_use_double_quant": true,
|
| 60 |
+
"bnb_4bit_quant_storage": null,
|
| 61 |
+
"max_new_tokens": 64,
|
| 62 |
+
"temperature": 0.0,
|
| 63 |
+
"top_k": null,
|
| 64 |
+
"top_p": null,
|
| 65 |
+
"repetition_penalty": null,
|
| 66 |
+
"num_beams": 1,
|
| 67 |
+
"stream": false,
|
| 68 |
+
"stop_words": [],
|
| 69 |
+
"logprobs": false,
|
| 70 |
+
"top_logprobs": null,
|
| 71 |
+
"ckpt_dir": null,
|
| 72 |
+
"load_dataset_config": null,
|
| 73 |
+
"lora_modules": [],
|
| 74 |
+
"tuner_backend": "peft",
|
| 75 |
+
"train_type": "full",
|
| 76 |
+
"adapters": [],
|
| 77 |
+
"external_plugins": [],
|
| 78 |
+
"seed": 42,
|
| 79 |
+
"model_kwargs": {},
|
| 80 |
+
"load_args": false,
|
| 81 |
+
"load_data_args": false,
|
| 82 |
+
"use_hf": true,
|
| 83 |
+
"hub_token": null,
|
| 84 |
+
"custom_register_path": [],
|
| 85 |
+
"ignore_args_error": false,
|
| 86 |
+
"use_swift_lora": false,
|
| 87 |
+
"output_dir": "/data/yubo/CriticCoT/ms-swift/output_models_dsr_0514_p0/v0-20250515-025537",
|
| 88 |
+
"overwrite_output_dir": false,
|
| 89 |
+
"do_train": false,
|
| 90 |
+
"do_eval": false,
|
| 91 |
+
"do_predict": false,
|
| 92 |
+
"eval_strategy": "steps",
|
| 93 |
+
"prediction_loss_only": false,
|
| 94 |
+
"per_device_train_batch_size": 1,
|
| 95 |
+
"per_device_eval_batch_size": 1,
|
| 96 |
+
"per_gpu_train_batch_size": null,
|
| 97 |
+
"per_gpu_eval_batch_size": null,
|
| 98 |
+
"gradient_accumulation_steps": 128,
|
| 99 |
+
"eval_accumulation_steps": null,
|
| 100 |
+
"eval_delay": 0,
|
| 101 |
+
"torch_empty_cache_steps": null,
|
| 102 |
+
"learning_rate": 5e-06,
|
| 103 |
+
"weight_decay": 0.05,
|
| 104 |
+
"adam_beta1": 0.9,
|
| 105 |
+
"adam_beta2": 0.95,
|
| 106 |
+
"adam_epsilon": 1e-08,
|
| 107 |
+
"max_grad_norm": 1.0,
|
| 108 |
+
"num_train_epochs": 40.0,
|
| 109 |
+
"max_steps": -1,
|
| 110 |
+
"lr_scheduler_type": "cosine",
|
| 111 |
+
"lr_scheduler_kwargs": null,
|
| 112 |
+
"warmup_ratio": 0.2,
|
| 113 |
+
"warmup_steps": 0,
|
| 114 |
+
"log_level": "passive",
|
| 115 |
+
"log_level_replica": "warning",
|
| 116 |
+
"log_on_each_node": true,
|
| 117 |
+
"logging_dir": "/data/yubo/CriticCoT/ms-swift/output_models_dsr_0514_p0/v0-20250515-025537/runs",
|
| 118 |
+
"logging_strategy": "steps",
|
| 119 |
+
"logging_first_step": true,
|
| 120 |
+
"logging_steps": 1,
|
| 121 |
+
"logging_nan_inf_filter": true,
|
| 122 |
+
"save_strategy": "steps",
|
| 123 |
+
"save_steps": 2.0,
|
| 124 |
+
"save_total_limit": null,
|
| 125 |
+
"save_safetensors": true,
|
| 126 |
+
"save_on_each_node": false,
|
| 127 |
+
"save_only_model": true,
|
| 128 |
+
"restore_callback_states_from_checkpoint": false,
|
| 129 |
+
"no_cuda": false,
|
| 130 |
+
"use_cpu": false,
|
| 131 |
+
"use_mps_device": false,
|
| 132 |
+
"jit_mode_eval": false,
|
| 133 |
+
"use_ipex": false,
|
| 134 |
+
"bf16": true,
|
| 135 |
+
"fp16": false,
|
| 136 |
+
"fp16_opt_level": "O1",
|
| 137 |
+
"half_precision_backend": "auto",
|
| 138 |
+
"bf16_full_eval": false,
|
| 139 |
+
"fp16_full_eval": false,
|
| 140 |
+
"tf32": null,
|
| 141 |
+
"local_rank": 0,
|
| 142 |
+
"ddp_backend": "nccl",
|
| 143 |
+
"tpu_num_cores": null,
|
| 144 |
+
"tpu_metrics_debug": false,
|
| 145 |
+
"debug": null,
|
| 146 |
+
"dataloader_drop_last": false,
|
| 147 |
+
"eval_steps": 2.0,
|
| 148 |
+
"dataloader_num_workers": 4,
|
| 149 |
+
"dataloader_prefetch_factor": null,
|
| 150 |
+
"past_index": -1,
|
| 151 |
+
"run_name": "/data/yubo/CriticCoT/ms-swift/output_models_dsr_0514_p0/v0-20250515-025537",
|
| 152 |
+
"disable_tqdm": null,
|
| 153 |
+
"label_names": null,
|
| 154 |
+
"load_best_model_at_end": false,
|
| 155 |
+
"metric_for_best_model": "loss",
|
| 156 |
+
"greater_is_better": false,
|
| 157 |
+
"ignore_data_skip": false,
|
| 158 |
+
"fsdp": "",
|
| 159 |
+
"fsdp_min_num_params": 0,
|
| 160 |
+
"fsdp_config": null,
|
| 161 |
+
"tp_size": 0,
|
| 162 |
+
"fsdp_transformer_layer_cls_to_wrap": null,
|
| 163 |
+
"accelerator_config": {
|
| 164 |
+
"dispatch_batches": false
|
| 165 |
+
},
|
| 166 |
+
"deepspeed": {
|
| 167 |
+
"fp16": {
|
| 168 |
+
"enabled": "auto",
|
| 169 |
+
"loss_scale": 0,
|
| 170 |
+
"loss_scale_window": 1000,
|
| 171 |
+
"initial_scale_power": 16,
|
| 172 |
+
"hysteresis": 2,
|
| 173 |
+
"min_loss_scale": 1
|
| 174 |
+
},
|
| 175 |
+
"bf16": {
|
| 176 |
+
"enabled": "auto"
|
| 177 |
+
},
|
| 178 |
+
"zero_optimization": {
|
| 179 |
+
"stage": 3,
|
| 180 |
+
"offload_optimizer": {
|
| 181 |
+
"device": "none",
|
| 182 |
+
"pin_memory": true
|
| 183 |
+
},
|
| 184 |
+
"offload_param": {
|
| 185 |
+
"device": "none",
|
| 186 |
+
"pin_memory": true
|
| 187 |
+
},
|
| 188 |
+
"overlap_comm": false,
|
| 189 |
+
"contiguous_gradients": true,
|
| 190 |
+
"sub_group_size": 1000000000.0,
|
| 191 |
+
"reduce_bucket_size": "auto",
|
| 192 |
+
"zero_quantized_weights": false,
|
| 193 |
+
"zero_quantized_gradients": false,
|
| 194 |
+
"stage3_prefetch_bucket_size": "auto",
|
| 195 |
+
"stage3_param_persistence_threshold": "auto",
|
| 196 |
+
"stage3_max_live_parameters": 1000000000.0,
|
| 197 |
+
"stage3_max_reuse_distance": 1000000000.0,
|
| 198 |
+
"stage3_gather_16bit_weights_on_model_save": true
|
| 199 |
+
},
|
| 200 |
+
"gradient_accumulation_steps": "auto",
|
| 201 |
+
"gradient_clipping": "auto",
|
| 202 |
+
"steps_per_print": 2000,
|
| 203 |
+
"train_batch_size": "auto",
|
| 204 |
+
"train_micro_batch_size_per_gpu": "auto",
|
| 205 |
+
"wall_clock_breakdown": false
|
| 206 |
+
},
|
| 207 |
+
"label_smoothing_factor": 0.0,
|
| 208 |
+
"optim": "adamw_torch",
|
| 209 |
+
"optim_args": null,
|
| 210 |
+
"adafactor": false,
|
| 211 |
+
"group_by_length": false,
|
| 212 |
+
"length_column_name": "length",
|
| 213 |
+
"report_to": [
|
| 214 |
+
"wandb"
|
| 215 |
+
],
|
| 216 |
+
"ddp_find_unused_parameters": null,
|
| 217 |
+
"ddp_bucket_cap_mb": null,
|
| 218 |
+
"ddp_broadcast_buffers": null,
|
| 219 |
+
"dataloader_pin_memory": true,
|
| 220 |
+
"dataloader_persistent_workers": false,
|
| 221 |
+
"skip_memory_metrics": true,
|
| 222 |
+
"use_legacy_prediction_loop": false,
|
| 223 |
+
"push_to_hub": false,
|
| 224 |
+
"resume_from_checkpoint": null,
|
| 225 |
+
"hub_model_id": null,
|
| 226 |
+
"hub_strategy": "every_save",
|
| 227 |
+
"hub_private_repo": null,
|
| 228 |
+
"hub_always_push": false,
|
| 229 |
+
"gradient_checkpointing": true,
|
| 230 |
+
"gradient_checkpointing_kwargs": null,
|
| 231 |
+
"include_inputs_for_metrics": false,
|
| 232 |
+
"include_for_metrics": [],
|
| 233 |
+
"eval_do_concat_batches": true,
|
| 234 |
+
"fp16_backend": "auto",
|
| 235 |
+
"push_to_hub_model_id": null,
|
| 236 |
+
"push_to_hub_organization": null,
|
| 237 |
+
"push_to_hub_token": null,
|
| 238 |
+
"mp_parameters": "",
|
| 239 |
+
"auto_find_batch_size": false,
|
| 240 |
+
"full_determinism": false,
|
| 241 |
+
"torchdynamo": null,
|
| 242 |
+
"ray_scope": "last",
|
| 243 |
+
"ddp_timeout": 1800,
|
| 244 |
+
"torch_compile": false,
|
| 245 |
+
"torch_compile_backend": null,
|
| 246 |
+
"torch_compile_mode": null,
|
| 247 |
+
"include_tokens_per_second": false,
|
| 248 |
+
"include_num_input_tokens_seen": false,
|
| 249 |
+
"neftune_noise_alpha": null,
|
| 250 |
+
"optim_target_modules": null,
|
| 251 |
+
"batch_eval_metrics": false,
|
| 252 |
+
"eval_on_start": false,
|
| 253 |
+
"use_liger_kernel": false,
|
| 254 |
+
"eval_use_gather_object": false,
|
| 255 |
+
"average_tokens_across_devices": false,
|
| 256 |
+
"sortish_sampler": false,
|
| 257 |
+
"predict_with_generate": false,
|
| 258 |
+
"generation_max_length": null,
|
| 259 |
+
"generation_num_beams": null,
|
| 260 |
+
"generation_config": null,
|
| 261 |
+
"check_model": true,
|
| 262 |
+
"acc_strategy": "token",
|
| 263 |
+
"train_dataloader_shuffle": true,
|
| 264 |
+
"metric_warmup_step": 0,
|
| 265 |
+
"fsdp_num": 1,
|
| 266 |
+
"acc_steps": 1,
|
| 267 |
+
"eval_use_evalscope": false,
|
| 268 |
+
"eval_datasets": [],
|
| 269 |
+
"eval_limit": null,
|
| 270 |
+
"eval_datasets_args": null,
|
| 271 |
+
"eval_generation_config": null,
|
| 272 |
+
"freeze_parameters": [],
|
| 273 |
+
"freeze_parameters_ratio": 0.0,
|
| 274 |
+
"trainable_parameters": [],
|
| 275 |
+
"freeze_llm": false,
|
| 276 |
+
"freeze_vit": false,
|
| 277 |
+
"freeze_aligner": false,
|
| 278 |
+
"target_modules": [
|
| 279 |
+
"all-linear"
|
| 280 |
+
],
|
| 281 |
+
"target_regex": null,
|
| 282 |
+
"modules_to_save": [],
|
| 283 |
+
"lora_rank": 8,
|
| 284 |
+
"lora_alpha": 32,
|
| 285 |
+
"lora_dropout": 0.05,
|
| 286 |
+
"lora_bias": "none",
|
| 287 |
+
"lora_dtype": null,
|
| 288 |
+
"lorap_lr_ratio": null,
|
| 289 |
+
"use_rslora": false,
|
| 290 |
+
"use_dora": false,
|
| 291 |
+
"lora_ga_batch_size": 2,
|
| 292 |
+
"lora_ga_iters": 2,
|
| 293 |
+
"lora_ga_max_length": 1024,
|
| 294 |
+
"lora_ga_direction": "ArB2r",
|
| 295 |
+
"lora_ga_scale": "stable",
|
| 296 |
+
"lora_ga_stable_gamma": 16,
|
| 297 |
+
"init_weights": true,
|
| 298 |
+
"fourier_n_frequency": 2000,
|
| 299 |
+
"fourier_scaling": 300.0,
|
| 300 |
+
"boft_block_size": 4,
|
| 301 |
+
"boft_block_num": 0,
|
| 302 |
+
"boft_n_butterfly_factor": 1,
|
| 303 |
+
"boft_dropout": 0.0,
|
| 304 |
+
"vera_rank": 256,
|
| 305 |
+
"vera_projection_prng_key": 0,
|
| 306 |
+
"vera_dropout": 0.0,
|
| 307 |
+
"vera_d_initial": 0.1,
|
| 308 |
+
"adapter_act": "gelu",
|
| 309 |
+
"adapter_length": 128,
|
| 310 |
+
"use_galore": false,
|
| 311 |
+
"galore_target_modules": null,
|
| 312 |
+
"galore_rank": 128,
|
| 313 |
+
"galore_update_proj_gap": 50,
|
| 314 |
+
"galore_scale": 1.0,
|
| 315 |
+
"galore_proj_type": "std",
|
| 316 |
+
"galore_optim_per_parameter": false,
|
| 317 |
+
"galore_with_embedding": false,
|
| 318 |
+
"galore_quantization": false,
|
| 319 |
+
"galore_proj_quant": false,
|
| 320 |
+
"galore_proj_bits": 4,
|
| 321 |
+
"galore_proj_group_size": 256,
|
| 322 |
+
"galore_cos_threshold": 0.4,
|
| 323 |
+
"galore_gamma_proj": 2,
|
| 324 |
+
"galore_queue_size": 5,
|
| 325 |
+
"adalora_target_r": 8,
|
| 326 |
+
"adalora_init_r": 12,
|
| 327 |
+
"adalora_tinit": 0,
|
| 328 |
+
"adalora_tfinal": 0,
|
| 329 |
+
"adalora_deltaT": 1,
|
| 330 |
+
"adalora_beta1": 0.85,
|
| 331 |
+
"adalora_beta2": 0.85,
|
| 332 |
+
"adalora_orth_reg_weight": 0.5,
|
| 333 |
+
"llamapro_num_new_blocks": 4,
|
| 334 |
+
"llamapro_num_groups": null,
|
| 335 |
+
"lisa_activated_layers": 0,
|
| 336 |
+
"lisa_step_interval": 20,
|
| 337 |
+
"reft_layer_key": null,
|
| 338 |
+
"reft_layers": null,
|
| 339 |
+
"reft_rank": 4,
|
| 340 |
+
"reft_intervention_type": "LoreftIntervention",
|
| 341 |
+
"reft_args": null,
|
| 342 |
+
"swanlab_token": null,
|
| 343 |
+
"swanlab_project": null,
|
| 344 |
+
"swanlab_workspace": null,
|
| 345 |
+
"swanlab_exp_name": null,
|
| 346 |
+
"swanlab_mode": "cloud",
|
| 347 |
+
"add_version": true,
|
| 348 |
+
"resume_only_model": false,
|
| 349 |
+
"create_checkpoint_symlink": false,
|
| 350 |
+
"packing": false,
|
| 351 |
+
"lazy_tokenize": false,
|
| 352 |
+
"loss_type": null,
|
| 353 |
+
"optimizer": null,
|
| 354 |
+
"metric": null,
|
| 355 |
+
"zero_hpz_partition_size": null,
|
| 356 |
+
"rank": 0,
|
| 357 |
+
"global_world_size": 4,
|
| 358 |
+
"local_world_size": 4,
|
| 359 |
+
"model_suffix": "Qwen2.5-Math-7B",
|
| 360 |
+
"model_info": "ModelInfo(model_type='qwen2_5_math', model_dir='/data/yubo/models/Qwen2.5-Math-7B', torch_dtype=torch.bfloat16, max_model_len=4096, quant_method=None, quant_bits=None, rope_scaling=None, config=None, task_type='causal_lm', num_labels=None)",
|
| 361 |
+
"model_meta": "ModelMeta(model_type='qwen2_5_math', model_groups=[ModelGroup(models=[Model(ms_model_id='Qwen/Qwen2.5-Math-1.5B-Instruct', hf_model_id='Qwen/Qwen2.5-Math-1.5B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-Math-7B-Instruct', hf_model_id='Qwen/Qwen2.5-Math-7B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-Math-72B-Instruct', hf_model_id='Qwen/Qwen2.5-Math-72B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-Math-1.5B', hf_model_id='Qwen/Qwen2.5-Math-1.5B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-Math-7B', hf_model_id='Qwen/Qwen2.5-Math-7B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-Math-72B', hf_model_id='Qwen/Qwen2.5-Math-72B', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=['math'])], template='qwen2_5_math', get_function=<function get_model_tokenizer_with_flash_attn at 0x756621615fc0>, model_arch='llama', architectures=['Qwen2ForCausalLM'], additional_saved_files=[], torch_dtype=None, is_multimodal=False, is_reward=False, task_type=None, ignore_patterns=None, requires=['transformers>=4.37'], tags=['math'])",
|
| 362 |
+
"model_dir": "/data/yubo/models/Qwen2.5-Math-7B",
|
| 363 |
+
"hub": "<class 'swift.hub.hub.HFHub'>",
|
| 364 |
+
"evaluation_strategy": "steps",
|
| 365 |
+
"training_args": "Seq2SeqTrainingArguments(output_dir='/data/yubo/CriticCoT/ms-swift/output_models_dsr_0514_p0/v0-20250515-025537', overwrite_output_dir=False, do_train=False, do_eval=True, do_predict=False, eval_strategy=<IntervalStrategy.STEPS: 'steps'>, prediction_loss_only=False, per_device_train_batch_size=1, per_device_eval_batch_size=1, per_gpu_train_batch_size=None, per_gpu_eval_batch_size=None, gradient_accumulation_steps=128, eval_accumulation_steps=None, eval_delay=0, torch_empty_cache_steps=None, learning_rate=5e-06, weight_decay=0.05, adam_beta1=0.9, adam_beta2=0.95, adam_epsilon=1e-08, max_grad_norm=1.0, num_train_epochs=40.0, max_steps=-1, lr_scheduler_type=<SchedulerType.COSINE: 'cosine'>, lr_scheduler_kwargs=None, warmup_ratio=0.2, warmup_steps=0, log_level='passive', log_level_replica='warning', log_on_each_node=True, logging_dir='/data/yubo/CriticCoT/ms-swift/output_models_dsr_0514_p0/v0-20250515-025537/runs', logging_strategy=<IntervalStrategy.STEPS: 'steps'>, logging_first_step=True, logging_steps=1, logging_nan_inf_filter=True, save_strategy=<SaveStrategy.STEPS: 'steps'>, save_steps=2, save_total_limit=None, save_safetensors=True, save_on_each_node=False, save_only_model=True, restore_callback_states_from_checkpoint=False, no_cuda=False, use_cpu=False, use_mps_device=False, seed=42, data_seed=42, jit_mode_eval=False, use_ipex=False, bf16=True, fp16=False, fp16_opt_level='O1', half_precision_backend='auto', bf16_full_eval=False, fp16_full_eval=False, tf32=None, local_rank=0, ddp_backend='nccl', tpu_num_cores=None, tpu_metrics_debug=False, debug=[], dataloader_drop_last=False, eval_steps=2, dataloader_num_workers=4, dataloader_prefetch_factor=10, past_index=-1, run_name='/data/yubo/CriticCoT/ms-swift/output_models_dsr_0514_p0/v0-20250515-025537', disable_tqdm=False, remove_unused_columns=False, label_names=None, load_best_model_at_end=False, metric_for_best_model='loss', greater_is_better=False, ignore_data_skip=False, fsdp=[], fsdp_min_num_params=0, fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, tp_size=0, fsdp_transformer_layer_cls_to_wrap=None, accelerator_config=AcceleratorConfig(split_batches=False, dispatch_batches=False, even_batches=True, use_seedable_sampler=True, non_blocking=False, gradient_accumulation_kwargs=None, use_configured_state=False), deepspeed={'fp16': {'enabled': 'auto', 'loss_scale': 0, 'loss_scale_window': 1000, 'initial_scale_power': 16, 'hysteresis': 2, 'min_loss_scale': 1}, 'bf16': {'enabled': 'auto'}, 'zero_optimization': {'stage': 3, 'offload_optimizer': {'device': 'none', 'pin_memory': True}, 'offload_param': {'device': 'none', 'pin_memory': True}, 'overlap_comm': False, 'contiguous_gradients': True, 'sub_group_size': 1000000000.0, 'reduce_bucket_size': 'auto', 'zero_quantized_weights': False, 'zero_quantized_gradients': False, 'stage3_prefetch_bucket_size': 'auto', 'stage3_param_persistence_threshold': 'auto', 'stage3_max_live_parameters': 1000000000.0, 'stage3_max_reuse_distance': 1000000000.0, 'stage3_gather_16bit_weights_on_model_save': True}, 'gradient_accumulation_steps': 'auto', 'gradient_clipping': 'auto', 'steps_per_print': 2000, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'wall_clock_breakdown': False}, label_smoothing_factor=0.0, optim=<OptimizerNames.ADAMW_TORCH: 'adamw_torch'>, optim_args=None, adafactor=False, group_by_length=False, length_column_name='length', report_to=['wandb'], ddp_find_unused_parameters=None, ddp_bucket_cap_mb=None, ddp_broadcast_buffers=None, dataloader_pin_memory=True, dataloader_persistent_workers=False, skip_memory_metrics=True, use_legacy_prediction_loop=False, push_to_hub=False, resume_from_checkpoint=None, hub_model_id=None, hub_strategy=<HubStrategy.EVERY_SAVE: 'every_save'>, hub_token=None, hub_private_repo=None, hub_always_push=False, gradient_checkpointing=True, gradient_checkpointing_kwargs=None, include_inputs_for_metrics=False, include_for_metrics=[], eval_do_concat_batches=True, fp16_backend='auto', push_to_hub_model_id=None, push_to_hub_organization=None, push_to_hub_token=None, mp_parameters='', auto_find_batch_size=False, full_determinism=False, torchdynamo=None, ray_scope='last', ddp_timeout=1800, torch_compile=False, torch_compile_backend=None, torch_compile_mode=None, include_tokens_per_second=None, include_num_input_tokens_seen=None, neftune_noise_alpha=None, optim_target_modules=None, batch_eval_metrics=False, eval_on_start=False, use_liger_kernel=False, eval_use_gather_object=False, average_tokens_across_devices=None, sortish_sampler=False, predict_with_generate=False, generation_max_length=None, generation_num_beams=None, generation_config=None, check_model=True, acc_strategy='token', train_dataloader_shuffle=True, metric_warmup_step=0, fsdp_num=1, acc_steps=1, eval_use_evalscope=False, eval_datasets=[], eval_limit=None, eval_datasets_args=None, eval_generation_config=None, train_type='full', optimizer=None, local_repo_path=None, galore_config=None)"
|
| 366 |
+
}
|
config.json
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"Qwen2ForCausalLM"
|
| 4 |
+
],
|
| 5 |
+
"attention_dropout": 0.0,
|
| 6 |
+
"bos_token_id": 151643,
|
| 7 |
+
"eos_token_id": 151643,
|
| 8 |
+
"hidden_act": "silu",
|
| 9 |
+
"hidden_size": 3584,
|
| 10 |
+
"initializer_range": 0.02,
|
| 11 |
+
"intermediate_size": 18944,
|
| 12 |
+
"max_position_embeddings": 4096,
|
| 13 |
+
"max_window_layers": 28,
|
| 14 |
+
"model_type": "qwen2",
|
| 15 |
+
"num_attention_heads": 28,
|
| 16 |
+
"num_hidden_layers": 28,
|
| 17 |
+
"num_key_value_heads": 4,
|
| 18 |
+
"pad_token_id": 151643,
|
| 19 |
+
"rms_norm_eps": 1e-06,
|
| 20 |
+
"rope_scaling": null,
|
| 21 |
+
"rope_theta": 10000,
|
| 22 |
+
"sliding_window": 4096,
|
| 23 |
+
"tie_word_embeddings": false,
|
| 24 |
+
"torch_dtype": "bfloat16",
|
| 25 |
+
"transformers_version": "4.51.3",
|
| 26 |
+
"use_cache": false,
|
| 27 |
+
"use_mrope": false,
|
| 28 |
+
"use_sliding_window": false,
|
| 29 |
+
"vocab_size": 152064
|
| 30 |
+
}
|
generation_config.json
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token_id": 151643,
|
| 3 |
+
"eos_token_id": 151643,
|
| 4 |
+
"max_new_tokens": 2048,
|
| 5 |
+
"transformers_version": "4.51.3"
|
| 6 |
+
}
|
merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
model-00001-of-00004.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b63dd0b884d611eba911a76c92129f4b03e209a22c52a31d8c56ada6bda59034
|
| 3 |
+
size 4877660776
|
model-00002-of-00004.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:36eadf239a3dfd4b9ec3b82f00b0aad409b8d86cc620135130e008e67e19a5d8
|
| 3 |
+
size 4932751008
|
model-00003-of-00004.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c24e6194b3afd9bdbf98d4fbca7db1577b99fd9d359a6b79f1a3ffa4e0aa3023
|
| 3 |
+
size 4330865200
|
model-00004-of-00004.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:25f32fbb20b99416216de6fcc02bc0fda89de3fde5f2d7de4f483975583016bf
|
| 3 |
+
size 1089994880
|
model.safetensors.index.json
ADDED
|
@@ -0,0 +1,346 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"metadata": {
|
| 3 |
+
"total_size": 15231233024
|
| 4 |
+
},
|
| 5 |
+
"weight_map": {
|
| 6 |
+
"lm_head.weight": "model-00004-of-00004.safetensors",
|
| 7 |
+
"model.embed_tokens.weight": "model-00001-of-00004.safetensors",
|
| 8 |
+
"model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 9 |
+
"model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 10 |
+
"model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 11 |
+
"model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 12 |
+
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 13 |
+
"model.layers.0.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 14 |
+
"model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 15 |
+
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 16 |
+
"model.layers.0.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 17 |
+
"model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 18 |
+
"model.layers.0.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 19 |
+
"model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 20 |
+
"model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 21 |
+
"model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 22 |
+
"model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 23 |
+
"model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 24 |
+
"model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 25 |
+
"model.layers.1.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 26 |
+
"model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 27 |
+
"model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 28 |
+
"model.layers.1.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 29 |
+
"model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 30 |
+
"model.layers.1.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 31 |
+
"model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 32 |
+
"model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 33 |
+
"model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 34 |
+
"model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 35 |
+
"model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 36 |
+
"model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 37 |
+
"model.layers.10.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 38 |
+
"model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 39 |
+
"model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 40 |
+
"model.layers.10.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 41 |
+
"model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 42 |
+
"model.layers.10.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 43 |
+
"model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 44 |
+
"model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 45 |
+
"model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 46 |
+
"model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 47 |
+
"model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 48 |
+
"model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 49 |
+
"model.layers.11.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 50 |
+
"model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 51 |
+
"model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 52 |
+
"model.layers.11.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 53 |
+
"model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 54 |
+
"model.layers.11.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 55 |
+
"model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 56 |
+
"model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 57 |
+
"model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 58 |
+
"model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 59 |
+
"model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 60 |
+
"model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 61 |
+
"model.layers.12.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 62 |
+
"model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 63 |
+
"model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 64 |
+
"model.layers.12.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 65 |
+
"model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 66 |
+
"model.layers.12.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 67 |
+
"model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 68 |
+
"model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 69 |
+
"model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 70 |
+
"model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 71 |
+
"model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 72 |
+
"model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 73 |
+
"model.layers.13.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 74 |
+
"model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 75 |
+
"model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 76 |
+
"model.layers.13.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 77 |
+
"model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 78 |
+
"model.layers.13.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 79 |
+
"model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 80 |
+
"model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 81 |
+
"model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 82 |
+
"model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 83 |
+
"model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 84 |
+
"model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 85 |
+
"model.layers.14.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 86 |
+
"model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 87 |
+
"model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 88 |
+
"model.layers.14.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 89 |
+
"model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 90 |
+
"model.layers.14.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 91 |
+
"model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 92 |
+
"model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 93 |
+
"model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 94 |
+
"model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 95 |
+
"model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 96 |
+
"model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 97 |
+
"model.layers.15.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 98 |
+
"model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 99 |
+
"model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 100 |
+
"model.layers.15.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 101 |
+
"model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 102 |
+
"model.layers.15.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 103 |
+
"model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 104 |
+
"model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 105 |
+
"model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 106 |
+
"model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 107 |
+
"model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 108 |
+
"model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 109 |
+
"model.layers.16.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 110 |
+
"model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 111 |
+
"model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 112 |
+
"model.layers.16.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 113 |
+
"model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 114 |
+
"model.layers.16.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 115 |
+
"model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 116 |
+
"model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 117 |
+
"model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 118 |
+
"model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 119 |
+
"model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 120 |
+
"model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 121 |
+
"model.layers.17.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 122 |
+
"model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 123 |
+
"model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 124 |
+
"model.layers.17.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 125 |
+
"model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 126 |
+
"model.layers.17.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 127 |
+
"model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 128 |
+
"model.layers.18.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 129 |
+
"model.layers.18.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 130 |
+
"model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 131 |
+
"model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 132 |
+
"model.layers.18.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 133 |
+
"model.layers.18.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 134 |
+
"model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 135 |
+
"model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 136 |
+
"model.layers.18.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 137 |
+
"model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 138 |
+
"model.layers.18.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 139 |
+
"model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 140 |
+
"model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 141 |
+
"model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 142 |
+
"model.layers.19.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 143 |
+
"model.layers.19.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 144 |
+
"model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 145 |
+
"model.layers.19.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 146 |
+
"model.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 147 |
+
"model.layers.19.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 148 |
+
"model.layers.19.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 149 |
+
"model.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 150 |
+
"model.layers.19.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 151 |
+
"model.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 152 |
+
"model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 153 |
+
"model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 154 |
+
"model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 155 |
+
"model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 156 |
+
"model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 157 |
+
"model.layers.2.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 158 |
+
"model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 159 |
+
"model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 160 |
+
"model.layers.2.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 161 |
+
"model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 162 |
+
"model.layers.2.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 163 |
+
"model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 164 |
+
"model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 165 |
+
"model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 166 |
+
"model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 167 |
+
"model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 168 |
+
"model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 169 |
+
"model.layers.20.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 170 |
+
"model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 171 |
+
"model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 172 |
+
"model.layers.20.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 173 |
+
"model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 174 |
+
"model.layers.20.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 175 |
+
"model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 176 |
+
"model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 177 |
+
"model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 178 |
+
"model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 179 |
+
"model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 180 |
+
"model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 181 |
+
"model.layers.21.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 182 |
+
"model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 183 |
+
"model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 184 |
+
"model.layers.21.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 185 |
+
"model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 186 |
+
"model.layers.21.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 187 |
+
"model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 188 |
+
"model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 189 |
+
"model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 190 |
+
"model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 191 |
+
"model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 192 |
+
"model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 193 |
+
"model.layers.22.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 194 |
+
"model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 195 |
+
"model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 196 |
+
"model.layers.22.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 197 |
+
"model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 198 |
+
"model.layers.22.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 199 |
+
"model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 200 |
+
"model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 201 |
+
"model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 202 |
+
"model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 203 |
+
"model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 204 |
+
"model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 205 |
+
"model.layers.23.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 206 |
+
"model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 207 |
+
"model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 208 |
+
"model.layers.23.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 209 |
+
"model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 210 |
+
"model.layers.23.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 211 |
+
"model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 212 |
+
"model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 213 |
+
"model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 214 |
+
"model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 215 |
+
"model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 216 |
+
"model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 217 |
+
"model.layers.24.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 218 |
+
"model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 219 |
+
"model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 220 |
+
"model.layers.24.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 221 |
+
"model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 222 |
+
"model.layers.24.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 223 |
+
"model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 224 |
+
"model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 225 |
+
"model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 226 |
+
"model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 227 |
+
"model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 228 |
+
"model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 229 |
+
"model.layers.25.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 230 |
+
"model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 231 |
+
"model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 232 |
+
"model.layers.25.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 233 |
+
"model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 234 |
+
"model.layers.25.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 235 |
+
"model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 236 |
+
"model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 237 |
+
"model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 238 |
+
"model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 239 |
+
"model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 240 |
+
"model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 241 |
+
"model.layers.26.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 242 |
+
"model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 243 |
+
"model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 244 |
+
"model.layers.26.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 245 |
+
"model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 246 |
+
"model.layers.26.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 247 |
+
"model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 248 |
+
"model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 249 |
+
"model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 250 |
+
"model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 251 |
+
"model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 252 |
+
"model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 253 |
+
"model.layers.27.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 254 |
+
"model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 255 |
+
"model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 256 |
+
"model.layers.27.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 257 |
+
"model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 258 |
+
"model.layers.27.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 259 |
+
"model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 260 |
+
"model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 261 |
+
"model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 262 |
+
"model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 263 |
+
"model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 264 |
+
"model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 265 |
+
"model.layers.3.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 266 |
+
"model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 267 |
+
"model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 268 |
+
"model.layers.3.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 269 |
+
"model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 270 |
+
"model.layers.3.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 271 |
+
"model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 272 |
+
"model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 273 |
+
"model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 274 |
+
"model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 275 |
+
"model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 276 |
+
"model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 277 |
+
"model.layers.4.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 278 |
+
"model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 279 |
+
"model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 280 |
+
"model.layers.4.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 281 |
+
"model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 282 |
+
"model.layers.4.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 283 |
+
"model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 284 |
+
"model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 285 |
+
"model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 286 |
+
"model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 287 |
+
"model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 288 |
+
"model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 289 |
+
"model.layers.5.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 290 |
+
"model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 291 |
+
"model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 292 |
+
"model.layers.5.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 293 |
+
"model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 294 |
+
"model.layers.5.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 295 |
+
"model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 296 |
+
"model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 297 |
+
"model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 298 |
+
"model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 299 |
+
"model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 300 |
+
"model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 301 |
+
"model.layers.6.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 302 |
+
"model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 303 |
+
"model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 304 |
+
"model.layers.6.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 305 |
+
"model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 306 |
+
"model.layers.6.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 307 |
+
"model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 308 |
+
"model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 309 |
+
"model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 310 |
+
"model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 311 |
+
"model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 312 |
+
"model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 313 |
+
"model.layers.7.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 314 |
+
"model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 315 |
+
"model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 316 |
+
"model.layers.7.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 317 |
+
"model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 318 |
+
"model.layers.7.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 319 |
+
"model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 320 |
+
"model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 321 |
+
"model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 322 |
+
"model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 323 |
+
"model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 324 |
+
"model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 325 |
+
"model.layers.8.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 326 |
+
"model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 327 |
+
"model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 328 |
+
"model.layers.8.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 329 |
+
"model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 330 |
+
"model.layers.8.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 331 |
+
"model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 332 |
+
"model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 333 |
+
"model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 334 |
+
"model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 335 |
+
"model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 336 |
+
"model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 337 |
+
"model.layers.9.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 338 |
+
"model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 339 |
+
"model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 340 |
+
"model.layers.9.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 341 |
+
"model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 342 |
+
"model.layers.9.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 343 |
+
"model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 344 |
+
"model.norm.weight": "model-00003-of-00004.safetensors"
|
| 345 |
+
}
|
| 346 |
+
}
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"additional_special_tokens": [
|
| 3 |
+
"<|im_start|>",
|
| 4 |
+
"<|im_end|>",
|
| 5 |
+
"<|object_ref_start|>",
|
| 6 |
+
"<|object_ref_end|>",
|
| 7 |
+
"<|box_start|>",
|
| 8 |
+
"<|box_end|>",
|
| 9 |
+
"<|quad_start|>",
|
| 10 |
+
"<|quad_end|>",
|
| 11 |
+
"<|vision_start|>",
|
| 12 |
+
"<|vision_end|>",
|
| 13 |
+
"<|vision_pad|>",
|
| 14 |
+
"<|image_pad|>",
|
| 15 |
+
"<|video_pad|>"
|
| 16 |
+
],
|
| 17 |
+
"eos_token": {
|
| 18 |
+
"content": "<|endoftext|>",
|
| 19 |
+
"lstrip": false,
|
| 20 |
+
"normalized": false,
|
| 21 |
+
"rstrip": false,
|
| 22 |
+
"single_word": false
|
| 23 |
+
},
|
| 24 |
+
"pad_token": {
|
| 25 |
+
"content": "<|endoftext|>",
|
| 26 |
+
"lstrip": false,
|
| 27 |
+
"normalized": false,
|
| 28 |
+
"rstrip": false,
|
| 29 |
+
"single_word": false
|
| 30 |
+
}
|
| 31 |
+
}
|
tokenizer.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9c5ae00e602b8860cbd784ba82a8aa14e8feecec692e7076590d014d7b7fdafa
|
| 3 |
+
size 11421896
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_bos_token": false,
|
| 3 |
+
"add_prefix_space": false,
|
| 4 |
+
"added_tokens_decoder": {
|
| 5 |
+
"151643": {
|
| 6 |
+
"content": "<|endoftext|>",
|
| 7 |
+
"lstrip": false,
|
| 8 |
+
"normalized": false,
|
| 9 |
+
"rstrip": false,
|
| 10 |
+
"single_word": false,
|
| 11 |
+
"special": true
|
| 12 |
+
},
|
| 13 |
+
"151644": {
|
| 14 |
+
"content": "<|im_start|>",
|
| 15 |
+
"lstrip": false,
|
| 16 |
+
"normalized": false,
|
| 17 |
+
"rstrip": false,
|
| 18 |
+
"single_word": false,
|
| 19 |
+
"special": true
|
| 20 |
+
},
|
| 21 |
+
"151645": {
|
| 22 |
+
"content": "<|im_end|>",
|
| 23 |
+
"lstrip": false,
|
| 24 |
+
"normalized": false,
|
| 25 |
+
"rstrip": false,
|
| 26 |
+
"single_word": false,
|
| 27 |
+
"special": true
|
| 28 |
+
},
|
| 29 |
+
"151646": {
|
| 30 |
+
"content": "<|object_ref_start|>",
|
| 31 |
+
"lstrip": false,
|
| 32 |
+
"normalized": false,
|
| 33 |
+
"rstrip": false,
|
| 34 |
+
"single_word": false,
|
| 35 |
+
"special": true
|
| 36 |
+
},
|
| 37 |
+
"151647": {
|
| 38 |
+
"content": "<|object_ref_end|>",
|
| 39 |
+
"lstrip": false,
|
| 40 |
+
"normalized": false,
|
| 41 |
+
"rstrip": false,
|
| 42 |
+
"single_word": false,
|
| 43 |
+
"special": true
|
| 44 |
+
},
|
| 45 |
+
"151648": {
|
| 46 |
+
"content": "<|box_start|>",
|
| 47 |
+
"lstrip": false,
|
| 48 |
+
"normalized": false,
|
| 49 |
+
"rstrip": false,
|
| 50 |
+
"single_word": false,
|
| 51 |
+
"special": true
|
| 52 |
+
},
|
| 53 |
+
"151649": {
|
| 54 |
+
"content": "<|box_end|>",
|
| 55 |
+
"lstrip": false,
|
| 56 |
+
"normalized": false,
|
| 57 |
+
"rstrip": false,
|
| 58 |
+
"single_word": false,
|
| 59 |
+
"special": true
|
| 60 |
+
},
|
| 61 |
+
"151650": {
|
| 62 |
+
"content": "<|quad_start|>",
|
| 63 |
+
"lstrip": false,
|
| 64 |
+
"normalized": false,
|
| 65 |
+
"rstrip": false,
|
| 66 |
+
"single_word": false,
|
| 67 |
+
"special": true
|
| 68 |
+
},
|
| 69 |
+
"151651": {
|
| 70 |
+
"content": "<|quad_end|>",
|
| 71 |
+
"lstrip": false,
|
| 72 |
+
"normalized": false,
|
| 73 |
+
"rstrip": false,
|
| 74 |
+
"single_word": false,
|
| 75 |
+
"special": true
|
| 76 |
+
},
|
| 77 |
+
"151652": {
|
| 78 |
+
"content": "<|vision_start|>",
|
| 79 |
+
"lstrip": false,
|
| 80 |
+
"normalized": false,
|
| 81 |
+
"rstrip": false,
|
| 82 |
+
"single_word": false,
|
| 83 |
+
"special": true
|
| 84 |
+
},
|
| 85 |
+
"151653": {
|
| 86 |
+
"content": "<|vision_end|>",
|
| 87 |
+
"lstrip": false,
|
| 88 |
+
"normalized": false,
|
| 89 |
+
"rstrip": false,
|
| 90 |
+
"single_word": false,
|
| 91 |
+
"special": true
|
| 92 |
+
},
|
| 93 |
+
"151654": {
|
| 94 |
+
"content": "<|vision_pad|>",
|
| 95 |
+
"lstrip": false,
|
| 96 |
+
"normalized": false,
|
| 97 |
+
"rstrip": false,
|
| 98 |
+
"single_word": false,
|
| 99 |
+
"special": true
|
| 100 |
+
},
|
| 101 |
+
"151655": {
|
| 102 |
+
"content": "<|image_pad|>",
|
| 103 |
+
"lstrip": false,
|
| 104 |
+
"normalized": false,
|
| 105 |
+
"rstrip": false,
|
| 106 |
+
"single_word": false,
|
| 107 |
+
"special": true
|
| 108 |
+
},
|
| 109 |
+
"151656": {
|
| 110 |
+
"content": "<|video_pad|>",
|
| 111 |
+
"lstrip": false,
|
| 112 |
+
"normalized": false,
|
| 113 |
+
"rstrip": false,
|
| 114 |
+
"single_word": false,
|
| 115 |
+
"special": true
|
| 116 |
+
},
|
| 117 |
+
"151657": {
|
| 118 |
+
"content": "<tool_call>",
|
| 119 |
+
"lstrip": false,
|
| 120 |
+
"normalized": false,
|
| 121 |
+
"rstrip": false,
|
| 122 |
+
"single_word": false,
|
| 123 |
+
"special": false
|
| 124 |
+
},
|
| 125 |
+
"151658": {
|
| 126 |
+
"content": "</tool_call>",
|
| 127 |
+
"lstrip": false,
|
| 128 |
+
"normalized": false,
|
| 129 |
+
"rstrip": false,
|
| 130 |
+
"single_word": false,
|
| 131 |
+
"special": false
|
| 132 |
+
},
|
| 133 |
+
"151659": {
|
| 134 |
+
"content": "<|fim_prefix|>",
|
| 135 |
+
"lstrip": false,
|
| 136 |
+
"normalized": false,
|
| 137 |
+
"rstrip": false,
|
| 138 |
+
"single_word": false,
|
| 139 |
+
"special": false
|
| 140 |
+
},
|
| 141 |
+
"151660": {
|
| 142 |
+
"content": "<|fim_middle|>",
|
| 143 |
+
"lstrip": false,
|
| 144 |
+
"normalized": false,
|
| 145 |
+
"rstrip": false,
|
| 146 |
+
"single_word": false,
|
| 147 |
+
"special": false
|
| 148 |
+
},
|
| 149 |
+
"151661": {
|
| 150 |
+
"content": "<|fim_suffix|>",
|
| 151 |
+
"lstrip": false,
|
| 152 |
+
"normalized": false,
|
| 153 |
+
"rstrip": false,
|
| 154 |
+
"single_word": false,
|
| 155 |
+
"special": false
|
| 156 |
+
},
|
| 157 |
+
"151662": {
|
| 158 |
+
"content": "<|fim_pad|>",
|
| 159 |
+
"lstrip": false,
|
| 160 |
+
"normalized": false,
|
| 161 |
+
"rstrip": false,
|
| 162 |
+
"single_word": false,
|
| 163 |
+
"special": false
|
| 164 |
+
},
|
| 165 |
+
"151663": {
|
| 166 |
+
"content": "<|repo_name|>",
|
| 167 |
+
"lstrip": false,
|
| 168 |
+
"normalized": false,
|
| 169 |
+
"rstrip": false,
|
| 170 |
+
"single_word": false,
|
| 171 |
+
"special": false
|
| 172 |
+
},
|
| 173 |
+
"151664": {
|
| 174 |
+
"content": "<|file_sep|>",
|
| 175 |
+
"lstrip": false,
|
| 176 |
+
"normalized": false,
|
| 177 |
+
"rstrip": false,
|
| 178 |
+
"single_word": false,
|
| 179 |
+
"special": false
|
| 180 |
+
}
|
| 181 |
+
},
|
| 182 |
+
"additional_special_tokens": [
|
| 183 |
+
"<|im_start|>",
|
| 184 |
+
"<|im_end|>",
|
| 185 |
+
"<|object_ref_start|>",
|
| 186 |
+
"<|object_ref_end|>",
|
| 187 |
+
"<|box_start|>",
|
| 188 |
+
"<|box_end|>",
|
| 189 |
+
"<|quad_start|>",
|
| 190 |
+
"<|quad_end|>",
|
| 191 |
+
"<|vision_start|>",
|
| 192 |
+
"<|vision_end|>",
|
| 193 |
+
"<|vision_pad|>",
|
| 194 |
+
"<|image_pad|>",
|
| 195 |
+
"<|video_pad|>"
|
| 196 |
+
],
|
| 197 |
+
"bos_token": null,
|
| 198 |
+
"chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'Please reason step by step, and put your final answer within \\\\boxed{}.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nPlease reason step by step, and put your final answer within \\\\boxed{}.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
|
| 199 |
+
"clean_up_tokenization_spaces": false,
|
| 200 |
+
"eos_token": "<|endoftext|>",
|
| 201 |
+
"errors": "replace",
|
| 202 |
+
"extra_special_tokens": {},
|
| 203 |
+
"model_max_length": 131072,
|
| 204 |
+
"pad_token": "<|endoftext|>",
|
| 205 |
+
"split_special_tokens": false,
|
| 206 |
+
"tokenizer_class": "Qwen2Tokenizer",
|
| 207 |
+
"unk_token": null
|
| 208 |
+
}
|
trainer_state.json
ADDED
|
@@ -0,0 +1,334 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"best_global_step": null,
|
| 3 |
+
"best_metric": null,
|
| 4 |
+
"best_model_checkpoint": null,
|
| 5 |
+
"epoch": 29.78527607361963,
|
| 6 |
+
"eval_steps": 2,
|
| 7 |
+
"global_step": 30,
|
| 8 |
+
"is_hyper_param_search": false,
|
| 9 |
+
"is_local_process_zero": true,
|
| 10 |
+
"is_world_process_zero": true,
|
| 11 |
+
"log_history": [
|
| 12 |
+
{
|
| 13 |
+
"epoch": 0.7852760736196319,
|
| 14 |
+
"grad_norm": 16.91557013833436,
|
| 15 |
+
"learning_rate": 6.25e-07,
|
| 16 |
+
"loss": 0.6924901008605957,
|
| 17 |
+
"memory(GiB)": 59.48,
|
| 18 |
+
"step": 1,
|
| 19 |
+
"token_acc": 0.8249132429587075,
|
| 20 |
+
"train_speed(iter/s)": 0.004929
|
| 21 |
+
},
|
| 22 |
+
{
|
| 23 |
+
"epoch": 1.7852760736196318,
|
| 24 |
+
"grad_norm": 32.093819128778975,
|
| 25 |
+
"learning_rate": 1.25e-06,
|
| 26 |
+
"loss": 1.3804800510406494,
|
| 27 |
+
"memory(GiB)": 65.24,
|
| 28 |
+
"step": 2,
|
| 29 |
+
"token_acc": 0.8298509781294038,
|
| 30 |
+
"train_speed(iter/s)": 0.004777
|
| 31 |
+
},
|
| 32 |
+
{
|
| 33 |
+
"epoch": 2.785276073619632,
|
| 34 |
+
"grad_norm": 28.105492367287425,
|
| 35 |
+
"learning_rate": 1.8750000000000003e-06,
|
| 36 |
+
"loss": 1.368883490562439,
|
| 37 |
+
"memory(GiB)": 65.24,
|
| 38 |
+
"step": 3,
|
| 39 |
+
"token_acc": 0.8234861950468065,
|
| 40 |
+
"train_speed(iter/s)": 0.004504
|
| 41 |
+
},
|
| 42 |
+
{
|
| 43 |
+
"epoch": 3.785276073619632,
|
| 44 |
+
"grad_norm": 23.4948914824423,
|
| 45 |
+
"learning_rate": 2.5e-06,
|
| 46 |
+
"loss": 1.3871572017669678,
|
| 47 |
+
"memory(GiB)": 65.24,
|
| 48 |
+
"step": 4,
|
| 49 |
+
"token_acc": 0.8268231152830742,
|
| 50 |
+
"train_speed(iter/s)": 0.004539
|
| 51 |
+
},
|
| 52 |
+
{
|
| 53 |
+
"epoch": 4.785276073619632,
|
| 54 |
+
"grad_norm": 22.70640438340761,
|
| 55 |
+
"learning_rate": 3.125e-06,
|
| 56 |
+
"loss": 1.3857779502868652,
|
| 57 |
+
"memory(GiB)": 65.24,
|
| 58 |
+
"step": 5,
|
| 59 |
+
"token_acc": 0.8242320404393895,
|
| 60 |
+
"train_speed(iter/s)": 0.004446
|
| 61 |
+
},
|
| 62 |
+
{
|
| 63 |
+
"epoch": 5.785276073619632,
|
| 64 |
+
"grad_norm": 15.466734654904698,
|
| 65 |
+
"learning_rate": 3.7500000000000005e-06,
|
| 66 |
+
"loss": 1.2761751413345337,
|
| 67 |
+
"memory(GiB)": 65.24,
|
| 68 |
+
"step": 6,
|
| 69 |
+
"token_acc": 0.8388591983556012,
|
| 70 |
+
"train_speed(iter/s)": 0.004472
|
| 71 |
+
},
|
| 72 |
+
{
|
| 73 |
+
"epoch": 6.785276073619632,
|
| 74 |
+
"grad_norm": 16.008101850251936,
|
| 75 |
+
"learning_rate": 4.3750000000000005e-06,
|
| 76 |
+
"loss": 1.254455804824829,
|
| 77 |
+
"memory(GiB)": 65.24,
|
| 78 |
+
"step": 7,
|
| 79 |
+
"token_acc": 0.8327785368277233,
|
| 80 |
+
"train_speed(iter/s)": 0.004412
|
| 81 |
+
},
|
| 82 |
+
{
|
| 83 |
+
"epoch": 7.785276073619632,
|
| 84 |
+
"grad_norm": 14.437159936379576,
|
| 85 |
+
"learning_rate": 5e-06,
|
| 86 |
+
"loss": 1.2218172550201416,
|
| 87 |
+
"memory(GiB)": 65.24,
|
| 88 |
+
"step": 8,
|
| 89 |
+
"token_acc": 0.8436573882508192,
|
| 90 |
+
"train_speed(iter/s)": 0.004441
|
| 91 |
+
},
|
| 92 |
+
{
|
| 93 |
+
"epoch": 8.785276073619633,
|
| 94 |
+
"grad_norm": 11.827036693336053,
|
| 95 |
+
"learning_rate": 4.987961816680493e-06,
|
| 96 |
+
"loss": 1.0844422578811646,
|
| 97 |
+
"memory(GiB)": 65.24,
|
| 98 |
+
"step": 9,
|
| 99 |
+
"token_acc": 0.8532927916376647,
|
| 100 |
+
"train_speed(iter/s)": 0.004399
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"epoch": 9.785276073619633,
|
| 104 |
+
"grad_norm": 8.677216453543807,
|
| 105 |
+
"learning_rate": 4.9519632010080765e-06,
|
| 106 |
+
"loss": 1.0037188529968262,
|
| 107 |
+
"memory(GiB)": 65.24,
|
| 108 |
+
"step": 10,
|
| 109 |
+
"token_acc": 0.8562772549919669,
|
| 110 |
+
"train_speed(iter/s)": 0.00442
|
| 111 |
+
},
|
| 112 |
+
{
|
| 113 |
+
"epoch": 10.785276073619633,
|
| 114 |
+
"grad_norm": 9.608357831993152,
|
| 115 |
+
"learning_rate": 4.8923508393305224e-06,
|
| 116 |
+
"loss": 0.8877495527267456,
|
| 117 |
+
"memory(GiB)": 65.24,
|
| 118 |
+
"step": 11,
|
| 119 |
+
"token_acc": 0.8673956568612352,
|
| 120 |
+
"train_speed(iter/s)": 0.004387
|
| 121 |
+
},
|
| 122 |
+
{
|
| 123 |
+
"epoch": 11.785276073619633,
|
| 124 |
+
"grad_norm": 5.865247304013574,
|
| 125 |
+
"learning_rate": 4.809698831278217e-06,
|
| 126 |
+
"loss": 0.8756588697433472,
|
| 127 |
+
"memory(GiB)": 65.24,
|
| 128 |
+
"step": 12,
|
| 129 |
+
"token_acc": 0.8741366742033928,
|
| 130 |
+
"train_speed(iter/s)": 0.004405
|
| 131 |
+
},
|
| 132 |
+
{
|
| 133 |
+
"epoch": 12.785276073619633,
|
| 134 |
+
"grad_norm": 8.181094202272746,
|
| 135 |
+
"learning_rate": 4.704803160870888e-06,
|
| 136 |
+
"loss": 0.8439725637435913,
|
| 137 |
+
"memory(GiB)": 65.24,
|
| 138 |
+
"step": 13,
|
| 139 |
+
"token_acc": 0.8747614672415012,
|
| 140 |
+
"train_speed(iter/s)": 0.00438
|
| 141 |
+
},
|
| 142 |
+
{
|
| 143 |
+
"epoch": 13.785276073619633,
|
| 144 |
+
"grad_norm": 3.0779863644676038,
|
| 145 |
+
"learning_rate": 4.578674030756364e-06,
|
| 146 |
+
"loss": 0.8301749229431152,
|
| 147 |
+
"memory(GiB)": 65.24,
|
| 148 |
+
"step": 14,
|
| 149 |
+
"token_acc": 0.8825180146971391,
|
| 150 |
+
"train_speed(iter/s)": 0.0044
|
| 151 |
+
},
|
| 152 |
+
{
|
| 153 |
+
"epoch": 14.785276073619633,
|
| 154 |
+
"grad_norm": 2.2519308642126976,
|
| 155 |
+
"learning_rate": 4.432526133406843e-06,
|
| 156 |
+
"loss": 0.7788468599319458,
|
| 157 |
+
"memory(GiB)": 65.24,
|
| 158 |
+
"step": 15,
|
| 159 |
+
"token_acc": 0.8834006700827749,
|
| 160 |
+
"train_speed(iter/s)": 0.004377
|
| 161 |
+
},
|
| 162 |
+
{
|
| 163 |
+
"epoch": 15.785276073619633,
|
| 164 |
+
"grad_norm": 2.472550949106497,
|
| 165 |
+
"learning_rate": 4.267766952966369e-06,
|
| 166 |
+
"loss": 0.7373002767562866,
|
| 167 |
+
"memory(GiB)": 65.24,
|
| 168 |
+
"step": 16,
|
| 169 |
+
"token_acc": 0.8850886438730918,
|
| 170 |
+
"train_speed(iter/s)": 0.004393
|
| 171 |
+
},
|
| 172 |
+
{
|
| 173 |
+
"epoch": 16.78527607361963,
|
| 174 |
+
"grad_norm": 2.167954580123407,
|
| 175 |
+
"learning_rate": 4.085983210409114e-06,
|
| 176 |
+
"loss": 0.7213550806045532,
|
| 177 |
+
"memory(GiB)": 65.24,
|
| 178 |
+
"step": 17,
|
| 179 |
+
"token_acc": 0.8929335420724931,
|
| 180 |
+
"train_speed(iter/s)": 0.004374
|
| 181 |
+
},
|
| 182 |
+
{
|
| 183 |
+
"epoch": 17.78527607361963,
|
| 184 |
+
"grad_norm": 7.407922053021935,
|
| 185 |
+
"learning_rate": 3.888925582549006e-06,
|
| 186 |
+
"loss": 0.6829527616500854,
|
| 187 |
+
"memory(GiB)": 65.24,
|
| 188 |
+
"step": 18,
|
| 189 |
+
"token_acc": 0.898971411837915,
|
| 190 |
+
"train_speed(iter/s)": 0.004387
|
| 191 |
+
},
|
| 192 |
+
{
|
| 193 |
+
"epoch": 18.78527607361963,
|
| 194 |
+
"grad_norm": 3.205996489689816,
|
| 195 |
+
"learning_rate": 3.6784918420649952e-06,
|
| 196 |
+
"loss": 0.6937679648399353,
|
| 197 |
+
"memory(GiB)": 65.24,
|
| 198 |
+
"step": 19,
|
| 199 |
+
"token_acc": 0.89389552274522,
|
| 200 |
+
"train_speed(iter/s)": 0.004367
|
| 201 |
+
},
|
| 202 |
+
{
|
| 203 |
+
"epoch": 19.78527607361963,
|
| 204 |
+
"grad_norm": 1.8062200639280714,
|
| 205 |
+
"learning_rate": 3.4567085809127247e-06,
|
| 206 |
+
"loss": 0.6299235224723816,
|
| 207 |
+
"memory(GiB)": 65.24,
|
| 208 |
+
"step": 20,
|
| 209 |
+
"token_acc": 0.8986795675864391,
|
| 210 |
+
"train_speed(iter/s)": 0.004381
|
| 211 |
+
},
|
| 212 |
+
{
|
| 213 |
+
"epoch": 20.78527607361963,
|
| 214 |
+
"grad_norm": 1.8367029186994497,
|
| 215 |
+
"learning_rate": 3.225711693136156e-06,
|
| 216 |
+
"loss": 0.6152043342590332,
|
| 217 |
+
"memory(GiB)": 65.24,
|
| 218 |
+
"step": 21,
|
| 219 |
+
"token_acc": 0.8981181532678849,
|
| 220 |
+
"train_speed(iter/s)": 0.004366
|
| 221 |
+
},
|
| 222 |
+
{
|
| 223 |
+
"epoch": 21.78527607361963,
|
| 224 |
+
"grad_norm": 1.6858306482437981,
|
| 225 |
+
"learning_rate": 2.9877258050403214e-06,
|
| 226 |
+
"loss": 0.5852848887443542,
|
| 227 |
+
"memory(GiB)": 65.24,
|
| 228 |
+
"step": 22,
|
| 229 |
+
"token_acc": 0.9046662374852932,
|
| 230 |
+
"train_speed(iter/s)": 0.004377
|
| 231 |
+
},
|
| 232 |
+
{
|
| 233 |
+
"epoch": 22.78527607361963,
|
| 234 |
+
"grad_norm": 1.5396750222326772,
|
| 235 |
+
"learning_rate": 2.7450428508239024e-06,
|
| 236 |
+
"loss": 0.5857734680175781,
|
| 237 |
+
"memory(GiB)": 65.24,
|
| 238 |
+
"step": 23,
|
| 239 |
+
"token_acc": 0.9069728121195726,
|
| 240 |
+
"train_speed(iter/s)": 0.004362
|
| 241 |
+
},
|
| 242 |
+
{
|
| 243 |
+
"epoch": 23.78527607361963,
|
| 244 |
+
"grad_norm": 2.726801161598347,
|
| 245 |
+
"learning_rate": 2.5e-06,
|
| 246 |
+
"loss": 0.594467043876648,
|
| 247 |
+
"memory(GiB)": 65.24,
|
| 248 |
+
"step": 24,
|
| 249 |
+
"token_acc": 0.9061053767793295,
|
| 250 |
+
"train_speed(iter/s)": 0.004374
|
| 251 |
+
},
|
| 252 |
+
{
|
| 253 |
+
"epoch": 24.78527607361963,
|
| 254 |
+
"grad_norm": 1.3473993763783847,
|
| 255 |
+
"learning_rate": 2.2549571491760985e-06,
|
| 256 |
+
"loss": 0.5740076303482056,
|
| 257 |
+
"memory(GiB)": 65.24,
|
| 258 |
+
"step": 25,
|
| 259 |
+
"token_acc": 0.9047997112955611,
|
| 260 |
+
"train_speed(iter/s)": 0.004361
|
| 261 |
+
},
|
| 262 |
+
{
|
| 263 |
+
"epoch": 25.78527607361963,
|
| 264 |
+
"grad_norm": 1.4244563323397152,
|
| 265 |
+
"learning_rate": 2.01227419495968e-06,
|
| 266 |
+
"loss": 0.5681454539299011,
|
| 267 |
+
"memory(GiB)": 65.24,
|
| 268 |
+
"step": 26,
|
| 269 |
+
"token_acc": 0.9087721742360918,
|
| 270 |
+
"train_speed(iter/s)": 0.00437
|
| 271 |
+
},
|
| 272 |
+
{
|
| 273 |
+
"epoch": 26.78527607361963,
|
| 274 |
+
"grad_norm": 1.239553560433712,
|
| 275 |
+
"learning_rate": 1.7742883068638447e-06,
|
| 276 |
+
"loss": 0.5257890224456787,
|
| 277 |
+
"memory(GiB)": 65.24,
|
| 278 |
+
"step": 27,
|
| 279 |
+
"token_acc": 0.9064646632419414,
|
| 280 |
+
"train_speed(iter/s)": 0.00436
|
| 281 |
+
},
|
| 282 |
+
{
|
| 283 |
+
"epoch": 27.78527607361963,
|
| 284 |
+
"grad_norm": 1.1788437383399384,
|
| 285 |
+
"learning_rate": 1.5432914190872757e-06,
|
| 286 |
+
"loss": 0.5807796716690063,
|
| 287 |
+
"memory(GiB)": 65.24,
|
| 288 |
+
"step": 28,
|
| 289 |
+
"token_acc": 0.9147543418525945,
|
| 290 |
+
"train_speed(iter/s)": 0.004368
|
| 291 |
+
},
|
| 292 |
+
{
|
| 293 |
+
"epoch": 28.78527607361963,
|
| 294 |
+
"grad_norm": 1.1910633482768642,
|
| 295 |
+
"learning_rate": 1.3215081579350058e-06,
|
| 296 |
+
"loss": 0.5537985563278198,
|
| 297 |
+
"memory(GiB)": 65.24,
|
| 298 |
+
"step": 29,
|
| 299 |
+
"token_acc": 0.9150303118131536,
|
| 300 |
+
"train_speed(iter/s)": 0.004357
|
| 301 |
+
},
|
| 302 |
+
{
|
| 303 |
+
"epoch": 29.78527607361963,
|
| 304 |
+
"grad_norm": 1.2237195172674145,
|
| 305 |
+
"learning_rate": 1.1110744174509952e-06,
|
| 306 |
+
"loss": 0.5225300788879395,
|
| 307 |
+
"memory(GiB)": 65.24,
|
| 308 |
+
"step": 30,
|
| 309 |
+
"token_acc": 0.9138848454453157,
|
| 310 |
+
"train_speed(iter/s)": 0.004366
|
| 311 |
+
}
|
| 312 |
+
],
|
| 313 |
+
"logging_steps": 1,
|
| 314 |
+
"max_steps": 40,
|
| 315 |
+
"num_input_tokens_seen": 0,
|
| 316 |
+
"num_train_epochs": 40,
|
| 317 |
+
"save_steps": 2,
|
| 318 |
+
"stateful_callbacks": {
|
| 319 |
+
"TrainerControl": {
|
| 320 |
+
"args": {
|
| 321 |
+
"should_epoch_stop": false,
|
| 322 |
+
"should_evaluate": false,
|
| 323 |
+
"should_log": false,
|
| 324 |
+
"should_save": true,
|
| 325 |
+
"should_training_stop": false
|
| 326 |
+
},
|
| 327 |
+
"attributes": {}
|
| 328 |
+
}
|
| 329 |
+
},
|
| 330 |
+
"total_flos": 47704180129792.0,
|
| 331 |
+
"train_batch_size": 1,
|
| 332 |
+
"trial_name": null,
|
| 333 |
+
"trial_params": null
|
| 334 |
+
}
|
training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:82e566748a3c11a926e768f02646ab898e1fbf29196ffc5a2673392b765950c8
|
| 3 |
+
size 8312
|
vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|