| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 24, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.04, | |
| "grad_norm": 5.126630481226491, | |
| "learning_rate": 1.6666666666666665e-07, | |
| "logits/chosen": -2.8051414489746094, | |
| "logits/rejected": -2.824976921081543, | |
| "logps/chosen": -140.9189453125, | |
| "logps/rejected": -111.96463775634766, | |
| "loss": 0.6931, | |
| "rewards/accuracies": 0.0, | |
| "rewards/chosen": 0.0, | |
| "rewards/margins": 0.0, | |
| "rewards/rejected": 0.0, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "grad_norm": 4.3434825310394585, | |
| "learning_rate": 4.888932014465352e-07, | |
| "logits/chosen": -2.7855749130249023, | |
| "logits/rejected": -2.789283275604248, | |
| "logps/chosen": -91.09937286376953, | |
| "logps/rejected": -97.13131713867188, | |
| "loss": 0.6931, | |
| "rewards/accuracies": 0.3828125, | |
| "rewards/chosen": 0.0011320528574287891, | |
| "rewards/margins": 0.0004516332410275936, | |
| "rewards/rejected": 0.0006804197328165174, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "grad_norm": 4.884271965568999, | |
| "learning_rate": 3.75e-07, | |
| "logits/chosen": -2.8252010345458984, | |
| "logits/rejected": -2.8388328552246094, | |
| "logps/chosen": -114.865234375, | |
| "logps/rejected": -117.52699279785156, | |
| "loss": 0.6917, | |
| "rewards/accuracies": 0.53125, | |
| "rewards/chosen": 0.021610241383314133, | |
| "rewards/margins": 0.003150947391986847, | |
| "rewards/rejected": 0.018459293991327286, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "grad_norm": 4.267689170274191, | |
| "learning_rate": 1.9436976651092142e-07, | |
| "logits/chosen": -2.7744898796081543, | |
| "logits/rejected": -2.7769482135772705, | |
| "logps/chosen": -99.48928833007812, | |
| "logps/rejected": -101.80488586425781, | |
| "loss": 0.69, | |
| "rewards/accuracies": 0.518750011920929, | |
| "rewards/chosen": 0.04334767535328865, | |
| "rewards/margins": 0.005900852382183075, | |
| "rewards/rejected": 0.03744681924581528, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "grad_norm": 4.8670057291088185, | |
| "learning_rate": 4.3440306421001324e-08, | |
| "logits/chosen": -2.803718328475952, | |
| "logits/rejected": -2.8046982288360596, | |
| "logps/chosen": -84.83966827392578, | |
| "logps/rejected": -93.15818786621094, | |
| "loss": 0.6868, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": 0.05009683966636658, | |
| "rewards/margins": 0.012487905099987984, | |
| "rewards/rejected": 0.037608928978443146, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "step": 24, | |
| "total_flos": 0.0, | |
| "train_loss": 0.6898899773756663, | |
| "train_runtime": 644.9682, | |
| "train_samples_per_second": 4.738, | |
| "train_steps_per_second": 0.037 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 24, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "total_flos": 0.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |