| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 100, | |
| "global_step": 1556, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 1.0706638115631692e-10, | |
| "logits/chosen": 3.009334087371826, | |
| "logits/rejected": 2.8860254287719727, | |
| "logps/chosen": -363.7457580566406, | |
| "logps/rejected": -405.4437255859375, | |
| "loss": 0.6931, | |
| "rewards/accuracies": 0.0, | |
| "rewards/chosen": 0.0, | |
| "rewards/margins": 0.0, | |
| "rewards/rejected": 0.0, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 1.070663811563169e-09, | |
| "logits/chosen": 2.383711338043213, | |
| "logits/rejected": 3.0388379096984863, | |
| "logps/chosen": -456.6111145019531, | |
| "logps/rejected": -428.7125244140625, | |
| "loss": 2.7789, | |
| "rewards/accuracies": 0.4305555522441864, | |
| "rewards/chosen": -0.10625362396240234, | |
| "rewards/margins": -0.11497735977172852, | |
| "rewards/rejected": 0.008723735809326172, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 2.141327623126338e-09, | |
| "logits/chosen": 2.6908938884735107, | |
| "logits/rejected": 3.1237950325012207, | |
| "logps/chosen": -387.3503112792969, | |
| "logps/rejected": -384.4349060058594, | |
| "loss": 2.7716, | |
| "rewards/accuracies": 0.4625000059604645, | |
| "rewards/chosen": 0.43295717239379883, | |
| "rewards/margins": 1.0383365154266357, | |
| "rewards/rejected": -0.6053793430328369, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 3.2119914346895075e-09, | |
| "logits/chosen": 2.479438543319702, | |
| "logits/rejected": 2.9864134788513184, | |
| "logps/chosen": -409.71563720703125, | |
| "logps/rejected": -410.36260986328125, | |
| "loss": 3.0305, | |
| "rewards/accuracies": 0.574999988079071, | |
| "rewards/chosen": 0.3105888366699219, | |
| "rewards/margins": 0.5438008308410645, | |
| "rewards/rejected": -0.23321199417114258, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 4.282655246252676e-09, | |
| "logits/chosen": 2.513826847076416, | |
| "logits/rejected": 3.7642135620117188, | |
| "logps/chosen": -412.05780029296875, | |
| "logps/rejected": -408.3034973144531, | |
| "loss": 2.3915, | |
| "rewards/accuracies": 0.512499988079071, | |
| "rewards/chosen": -0.4330732822418213, | |
| "rewards/margins": 0.3383500576019287, | |
| "rewards/rejected": -0.77142333984375, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 5.353319057815846e-09, | |
| "logits/chosen": 2.6957950592041016, | |
| "logits/rejected": 3.350551128387451, | |
| "logps/chosen": -428.7166442871094, | |
| "logps/rejected": -425.9712829589844, | |
| "loss": 2.6906, | |
| "rewards/accuracies": 0.5375000238418579, | |
| "rewards/chosen": 0.6460269093513489, | |
| "rewards/margins": 0.9008991122245789, | |
| "rewards/rejected": -0.25487232208251953, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 6.423982869379015e-09, | |
| "logits/chosen": 1.877236008644104, | |
| "logits/rejected": 3.345506191253662, | |
| "logps/chosen": -423.69549560546875, | |
| "logps/rejected": -409.8067321777344, | |
| "loss": 1.8376, | |
| "rewards/accuracies": 0.6875, | |
| "rewards/chosen": 2.4741673469543457, | |
| "rewards/margins": 4.329440116882324, | |
| "rewards/rejected": -1.8552722930908203, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 7.494646680942184e-09, | |
| "logits/chosen": 2.3659510612487793, | |
| "logits/rejected": 3.347115993499756, | |
| "logps/chosen": -419.614990234375, | |
| "logps/rejected": -404.15386962890625, | |
| "loss": 1.8117, | |
| "rewards/accuracies": 0.637499988079071, | |
| "rewards/chosen": 1.2335608005523682, | |
| "rewards/margins": 1.696711778640747, | |
| "rewards/rejected": -0.4631509780883789, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 8.565310492505352e-09, | |
| "logits/chosen": 1.8567355871200562, | |
| "logits/rejected": 3.529881238937378, | |
| "logps/chosen": -454.572998046875, | |
| "logps/rejected": -433.12152099609375, | |
| "loss": 1.4964, | |
| "rewards/accuracies": 0.737500011920929, | |
| "rewards/chosen": 1.8937450647354126, | |
| "rewards/margins": 4.073466777801514, | |
| "rewards/rejected": -2.1797218322753906, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 9.635974304068522e-09, | |
| "logits/chosen": 2.379859447479248, | |
| "logits/rejected": 3.260948657989502, | |
| "logps/chosen": -451.9266052246094, | |
| "logps/rejected": -383.00042724609375, | |
| "loss": 1.6543, | |
| "rewards/accuracies": 0.6625000238418579, | |
| "rewards/chosen": 2.33457612991333, | |
| "rewards/margins": 3.6442272663116455, | |
| "rewards/rejected": -1.3096508979797363, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 1.0706638115631692e-08, | |
| "logits/chosen": 2.4745795726776123, | |
| "logits/rejected": 3.668903350830078, | |
| "logps/chosen": -402.97393798828125, | |
| "logps/rejected": -378.31890869140625, | |
| "loss": 1.5408, | |
| "rewards/accuracies": 0.675000011920929, | |
| "rewards/chosen": 0.7632139921188354, | |
| "rewards/margins": 2.8585281372070312, | |
| "rewards/rejected": -2.0953145027160645, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "eval_logits/chosen": 1.7967612743377686, | |
| "eval_logits/rejected": 2.483816146850586, | |
| "eval_logps/chosen": -403.54827880859375, | |
| "eval_logps/rejected": -402.92413330078125, | |
| "eval_loss": 1.0591531991958618, | |
| "eval_rewards/accuracies": 0.8125, | |
| "eval_rewards/chosen": 2.333465814590454, | |
| "eval_rewards/margins": 5.841272830963135, | |
| "eval_rewards/rejected": -3.5078072547912598, | |
| "eval_runtime": 76.9287, | |
| "eval_samples_per_second": 12.999, | |
| "eval_steps_per_second": 0.416, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 1.177730192719486e-08, | |
| "logits/chosen": 2.7680611610412598, | |
| "logits/rejected": 3.0494232177734375, | |
| "logps/chosen": -384.3839111328125, | |
| "logps/rejected": -407.72918701171875, | |
| "loss": 1.0018, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": 2.2038276195526123, | |
| "rewards/margins": 6.380339622497559, | |
| "rewards/rejected": -4.176511764526367, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 1.284796573875803e-08, | |
| "logits/chosen": 2.498812198638916, | |
| "logits/rejected": 3.4484715461730957, | |
| "logps/chosen": -401.28857421875, | |
| "logps/rejected": -426.8399963378906, | |
| "loss": 0.7845, | |
| "rewards/accuracies": 0.8374999761581421, | |
| "rewards/chosen": 2.2210142612457275, | |
| "rewards/margins": 8.185151100158691, | |
| "rewards/rejected": -5.964136123657227, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 1.3918629550321198e-08, | |
| "logits/chosen": 2.083467483520508, | |
| "logits/rejected": 2.998776435852051, | |
| "logps/chosen": -437.83709716796875, | |
| "logps/rejected": -462.96026611328125, | |
| "loss": 0.9019, | |
| "rewards/accuracies": 0.9125000238418579, | |
| "rewards/chosen": 4.379509449005127, | |
| "rewards/margins": 10.292404174804688, | |
| "rewards/rejected": -5.912895202636719, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 1.4989293361884368e-08, | |
| "logits/chosen": 2.5522398948669434, | |
| "logits/rejected": 3.3716235160827637, | |
| "logps/chosen": -422.5474548339844, | |
| "logps/rejected": -406.94183349609375, | |
| "loss": 0.8578, | |
| "rewards/accuracies": 0.8374999761581421, | |
| "rewards/chosen": 2.729921579360962, | |
| "rewards/margins": 7.9893903732299805, | |
| "rewards/rejected": -5.259469509124756, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 1.6059957173447535e-08, | |
| "logits/chosen": 2.7751362323760986, | |
| "logits/rejected": 2.9003946781158447, | |
| "logps/chosen": -391.8503723144531, | |
| "logps/rejected": -400.2032470703125, | |
| "loss": 0.7248, | |
| "rewards/accuracies": 0.800000011920929, | |
| "rewards/chosen": 1.3115911483764648, | |
| "rewards/margins": 8.482717514038086, | |
| "rewards/rejected": -7.171125888824463, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 1.7130620985010704e-08, | |
| "logits/chosen": 2.3851382732391357, | |
| "logits/rejected": 3.175081968307495, | |
| "logps/chosen": -482.95062255859375, | |
| "logps/rejected": -422.1293029785156, | |
| "loss": 0.6889, | |
| "rewards/accuracies": 0.8999999761581421, | |
| "rewards/chosen": 3.3662681579589844, | |
| "rewards/margins": 10.24647331237793, | |
| "rewards/rejected": -6.8802056312561035, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 1.8201284796573874e-08, | |
| "logits/chosen": 2.398458957672119, | |
| "logits/rejected": 3.4337353706359863, | |
| "logps/chosen": -422.200927734375, | |
| "logps/rejected": -402.37274169921875, | |
| "loss": 0.8347, | |
| "rewards/accuracies": 0.800000011920929, | |
| "rewards/chosen": 0.8539754748344421, | |
| "rewards/margins": 8.811124801635742, | |
| "rewards/rejected": -7.957149505615234, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 1.9271948608137044e-08, | |
| "logits/chosen": 2.4511101245880127, | |
| "logits/rejected": 3.5071682929992676, | |
| "logps/chosen": -444.2669372558594, | |
| "logps/rejected": -460.7818908691406, | |
| "loss": 0.7348, | |
| "rewards/accuracies": 0.862500011920929, | |
| "rewards/chosen": 0.6959193348884583, | |
| "rewards/margins": 10.471872329711914, | |
| "rewards/rejected": -9.77595329284668, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 2.0342612419700214e-08, | |
| "logits/chosen": 2.3941550254821777, | |
| "logits/rejected": 3.183414936065674, | |
| "logps/chosen": -406.2020263671875, | |
| "logps/rejected": -389.969970703125, | |
| "loss": 0.6852, | |
| "rewards/accuracies": 0.800000011920929, | |
| "rewards/chosen": 0.565895140171051, | |
| "rewards/margins": 9.187482833862305, | |
| "rewards/rejected": -8.621587753295898, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 2.1413276231263384e-08, | |
| "logits/chosen": 2.699617624282837, | |
| "logits/rejected": 3.2629177570343018, | |
| "logps/chosen": -370.5373840332031, | |
| "logps/rejected": -373.650634765625, | |
| "loss": 0.6805, | |
| "rewards/accuracies": 0.824999988079071, | |
| "rewards/chosen": 0.4384472370147705, | |
| "rewards/margins": 9.281099319458008, | |
| "rewards/rejected": -8.8426513671875, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "eval_logits/chosen": 1.8075158596038818, | |
| "eval_logits/rejected": 2.4975709915161133, | |
| "eval_logps/chosen": -403.94219970703125, | |
| "eval_logps/rejected": -404.73687744140625, | |
| "eval_loss": 0.7941137552261353, | |
| "eval_rewards/accuracies": 0.9140625, | |
| "eval_rewards/chosen": 0.36384421586990356, | |
| "eval_rewards/margins": 12.935296058654785, | |
| "eval_rewards/rejected": -12.571451187133789, | |
| "eval_runtime": 76.7386, | |
| "eval_samples_per_second": 13.031, | |
| "eval_steps_per_second": 0.417, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 2.248394004282655e-08, | |
| "logits/chosen": 2.466681718826294, | |
| "logits/rejected": 3.0487618446350098, | |
| "logps/chosen": -413.58941650390625, | |
| "logps/rejected": -422.9561462402344, | |
| "loss": 0.5453, | |
| "rewards/accuracies": 0.925000011920929, | |
| "rewards/chosen": 1.9145727157592773, | |
| "rewards/margins": 14.975469589233398, | |
| "rewards/rejected": -13.06089973449707, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 2.355460385438972e-08, | |
| "logits/chosen": 2.4473984241485596, | |
| "logits/rejected": 3.8849105834960938, | |
| "logps/chosen": -435.60198974609375, | |
| "logps/rejected": -394.08990478515625, | |
| "loss": 0.4307, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": 1.1083576679229736, | |
| "rewards/margins": 12.617959976196289, | |
| "rewards/rejected": -11.509601593017578, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 2.462526766595289e-08, | |
| "logits/chosen": 3.0809953212738037, | |
| "logits/rejected": 3.275381088256836, | |
| "logps/chosen": -387.32427978515625, | |
| "logps/rejected": -418.32745361328125, | |
| "loss": 0.4707, | |
| "rewards/accuracies": 0.925000011920929, | |
| "rewards/chosen": 2.789315700531006, | |
| "rewards/margins": 15.172778129577637, | |
| "rewards/rejected": -12.383462905883789, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 2.569593147751606e-08, | |
| "logits/chosen": 2.8269176483154297, | |
| "logits/rejected": 3.720735549926758, | |
| "logps/chosen": -400.30743408203125, | |
| "logps/rejected": -414.60430908203125, | |
| "loss": 0.4476, | |
| "rewards/accuracies": 0.9125000238418579, | |
| "rewards/chosen": 1.7549598217010498, | |
| "rewards/margins": 15.389744758605957, | |
| "rewards/rejected": -13.634783744812012, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 2.676659528907923e-08, | |
| "logits/chosen": 2.572218894958496, | |
| "logits/rejected": 2.84545636177063, | |
| "logps/chosen": -398.96356201171875, | |
| "logps/rejected": -404.20635986328125, | |
| "loss": 0.3304, | |
| "rewards/accuracies": 0.8999999761581421, | |
| "rewards/chosen": 1.083133578300476, | |
| "rewards/margins": 14.877154350280762, | |
| "rewards/rejected": -13.79401969909668, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 2.7837259100642396e-08, | |
| "logits/chosen": 2.4352571964263916, | |
| "logits/rejected": 3.324097156524658, | |
| "logps/chosen": -389.4182434082031, | |
| "logps/rejected": -390.0982666015625, | |
| "loss": 0.363, | |
| "rewards/accuracies": 0.8999999761581421, | |
| "rewards/chosen": 0.10746519267559052, | |
| "rewards/margins": 14.993916511535645, | |
| "rewards/rejected": -14.886451721191406, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 2.890792291220557e-08, | |
| "logits/chosen": 2.1370952129364014, | |
| "logits/rejected": 3.407914400100708, | |
| "logps/chosen": -434.8224182128906, | |
| "logps/rejected": -440.6358947753906, | |
| "loss": 0.4309, | |
| "rewards/accuracies": 0.925000011920929, | |
| "rewards/chosen": 2.220722198486328, | |
| "rewards/margins": 18.430742263793945, | |
| "rewards/rejected": -16.210018157958984, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 2.9978586723768736e-08, | |
| "logits/chosen": 2.740478992462158, | |
| "logits/rejected": 3.258228302001953, | |
| "logps/chosen": -433.20782470703125, | |
| "logps/rejected": -437.18719482421875, | |
| "loss": 0.3487, | |
| "rewards/accuracies": 0.8999999761581421, | |
| "rewards/chosen": 2.553795576095581, | |
| "rewards/margins": 19.554113388061523, | |
| "rewards/rejected": -17.00031852722168, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 3.1049250535331906e-08, | |
| "logits/chosen": 2.1817595958709717, | |
| "logits/rejected": 3.1689507961273193, | |
| "logps/chosen": -440.03863525390625, | |
| "logps/rejected": -409.65240478515625, | |
| "loss": 0.3633, | |
| "rewards/accuracies": 0.9375, | |
| "rewards/chosen": 1.1603565216064453, | |
| "rewards/margins": 17.322303771972656, | |
| "rewards/rejected": -16.161945343017578, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 3.211991434689507e-08, | |
| "logits/chosen": 2.570457935333252, | |
| "logits/rejected": 3.2502989768981934, | |
| "logps/chosen": -454.56890869140625, | |
| "logps/rejected": -416.1253356933594, | |
| "loss": 0.4447, | |
| "rewards/accuracies": 0.925000011920929, | |
| "rewards/chosen": 3.8409790992736816, | |
| "rewards/margins": 22.155860900878906, | |
| "rewards/rejected": -18.31488037109375, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "eval_logits/chosen": 1.8170030117034912, | |
| "eval_logits/rejected": 2.5056939125061035, | |
| "eval_logps/chosen": -403.96087646484375, | |
| "eval_logps/rejected": -405.89984130859375, | |
| "eval_loss": 0.7104232311248779, | |
| "eval_rewards/accuracies": 0.9296875, | |
| "eval_rewards/chosen": 0.27033889293670654, | |
| "eval_rewards/margins": 18.656814575195312, | |
| "eval_rewards/rejected": -18.386476516723633, | |
| "eval_runtime": 76.9022, | |
| "eval_samples_per_second": 13.004, | |
| "eval_steps_per_second": 0.416, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 3.3190578158458246e-08, | |
| "logits/chosen": 2.6424567699432373, | |
| "logits/rejected": 3.5541133880615234, | |
| "logps/chosen": -389.58636474609375, | |
| "logps/rejected": -417.542236328125, | |
| "loss": 0.1892, | |
| "rewards/accuracies": 0.9750000238418579, | |
| "rewards/chosen": 2.047499179840088, | |
| "rewards/margins": 22.001834869384766, | |
| "rewards/rejected": -19.954334259033203, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 3.426124197002141e-08, | |
| "logits/chosen": 2.256901264190674, | |
| "logits/rejected": 3.4731545448303223, | |
| "logps/chosen": -414.03875732421875, | |
| "logps/rejected": -427.070556640625, | |
| "loss": 0.3266, | |
| "rewards/accuracies": 0.925000011920929, | |
| "rewards/chosen": 0.8677770495414734, | |
| "rewards/margins": 22.491207122802734, | |
| "rewards/rejected": -21.623430252075195, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 3.533190578158458e-08, | |
| "logits/chosen": 2.77763295173645, | |
| "logits/rejected": 3.112457752227783, | |
| "logps/chosen": -360.58135986328125, | |
| "logps/rejected": -414.3995666503906, | |
| "loss": 0.2844, | |
| "rewards/accuracies": 0.949999988079071, | |
| "rewards/chosen": 2.416537046432495, | |
| "rewards/margins": 22.439632415771484, | |
| "rewards/rejected": -20.023096084594727, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 3.640256959314775e-08, | |
| "logits/chosen": 2.5908098220825195, | |
| "logits/rejected": 3.282468795776367, | |
| "logps/chosen": -449.01922607421875, | |
| "logps/rejected": -422.73016357421875, | |
| "loss": 0.3479, | |
| "rewards/accuracies": 0.9750000238418579, | |
| "rewards/chosen": 2.354691505432129, | |
| "rewards/margins": 22.229778289794922, | |
| "rewards/rejected": -19.875089645385742, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 3.747323340471092e-08, | |
| "logits/chosen": 2.534389019012451, | |
| "logits/rejected": 3.5590693950653076, | |
| "logps/chosen": -405.7064514160156, | |
| "logps/rejected": -434.4012756347656, | |
| "loss": 0.4823, | |
| "rewards/accuracies": 0.925000011920929, | |
| "rewards/chosen": 1.6845805644989014, | |
| "rewards/margins": 23.965158462524414, | |
| "rewards/rejected": -22.280576705932617, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 3.854389721627409e-08, | |
| "logits/chosen": 2.5174474716186523, | |
| "logits/rejected": 3.512732744216919, | |
| "logps/chosen": -435.5006408691406, | |
| "logps/rejected": -422.19366455078125, | |
| "loss": 0.204, | |
| "rewards/accuracies": 0.9750000238418579, | |
| "rewards/chosen": 3.582836151123047, | |
| "rewards/margins": 24.59599494934082, | |
| "rewards/rejected": -21.013154983520508, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 3.961456102783726e-08, | |
| "logits/chosen": 2.3562188148498535, | |
| "logits/rejected": 3.3465442657470703, | |
| "logps/chosen": -470.4930725097656, | |
| "logps/rejected": -405.6280822753906, | |
| "loss": 0.3122, | |
| "rewards/accuracies": 0.9624999761581421, | |
| "rewards/chosen": 1.8608239889144897, | |
| "rewards/margins": 23.556621551513672, | |
| "rewards/rejected": -21.6957950592041, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 4.068522483940043e-08, | |
| "logits/chosen": 2.7507357597351074, | |
| "logits/rejected": 3.5644543170928955, | |
| "logps/chosen": -368.4208068847656, | |
| "logps/rejected": -416.85302734375, | |
| "loss": 0.2533, | |
| "rewards/accuracies": 0.9375, | |
| "rewards/chosen": 1.0723613500595093, | |
| "rewards/margins": 24.53675651550293, | |
| "rewards/rejected": -23.464397430419922, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 4.175588865096359e-08, | |
| "logits/chosen": 2.4924309253692627, | |
| "logits/rejected": 3.5116946697235107, | |
| "logps/chosen": -383.086181640625, | |
| "logps/rejected": -417.8291931152344, | |
| "loss": 0.2178, | |
| "rewards/accuracies": 0.887499988079071, | |
| "rewards/chosen": 4.934910774230957, | |
| "rewards/margins": 26.8948974609375, | |
| "rewards/rejected": -21.959985733032227, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 4.282655246252677e-08, | |
| "logits/chosen": 2.5591559410095215, | |
| "logits/rejected": 3.345097780227661, | |
| "logps/chosen": -449.8941955566406, | |
| "logps/rejected": -431.88818359375, | |
| "loss": 0.2468, | |
| "rewards/accuracies": 0.9624999761581421, | |
| "rewards/chosen": 5.43572473526001, | |
| "rewards/margins": 28.937246322631836, | |
| "rewards/rejected": -23.501522064208984, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "eval_logits/chosen": 1.8141372203826904, | |
| "eval_logits/rejected": 2.503908395767212, | |
| "eval_logps/chosen": -403.28955078125, | |
| "eval_logps/rejected": -406.5526123046875, | |
| "eval_loss": 0.5629487037658691, | |
| "eval_rewards/accuracies": 0.9140625, | |
| "eval_rewards/chosen": 3.626873731613159, | |
| "eval_rewards/margins": 25.27718162536621, | |
| "eval_rewards/rejected": -21.650310516357422, | |
| "eval_runtime": 76.66, | |
| "eval_samples_per_second": 13.045, | |
| "eval_steps_per_second": 0.417, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 4.389721627408993e-08, | |
| "logits/chosen": 2.353135585784912, | |
| "logits/rejected": 3.1894092559814453, | |
| "logps/chosen": -386.232666015625, | |
| "logps/rejected": -399.92218017578125, | |
| "loss": 0.1749, | |
| "rewards/accuracies": 0.9624999761581421, | |
| "rewards/chosen": 5.276652812957764, | |
| "rewards/margins": 27.45583724975586, | |
| "rewards/rejected": -22.17918586730957, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 4.49678800856531e-08, | |
| "logits/chosen": 1.99558424949646, | |
| "logits/rejected": 3.1743838787078857, | |
| "logps/chosen": -412.32110595703125, | |
| "logps/rejected": -421.9241638183594, | |
| "loss": 0.272, | |
| "rewards/accuracies": 0.9750000238418579, | |
| "rewards/chosen": 6.199848175048828, | |
| "rewards/margins": 27.825729370117188, | |
| "rewards/rejected": -21.62588119506836, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 4.603854389721627e-08, | |
| "logits/chosen": 2.605555772781372, | |
| "logits/rejected": 3.407613754272461, | |
| "logps/chosen": -434.2098083496094, | |
| "logps/rejected": -447.4507751464844, | |
| "loss": 0.3003, | |
| "rewards/accuracies": 0.9624999761581421, | |
| "rewards/chosen": 8.46845531463623, | |
| "rewards/margins": 32.25395584106445, | |
| "rewards/rejected": -23.785497665405273, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 4.710920770877944e-08, | |
| "logits/chosen": 2.1723690032958984, | |
| "logits/rejected": 3.847750186920166, | |
| "logps/chosen": -421.90155029296875, | |
| "logps/rejected": -414.0150451660156, | |
| "loss": 0.2367, | |
| "rewards/accuracies": 0.9375, | |
| "rewards/chosen": 8.460034370422363, | |
| "rewards/margins": 31.530038833618164, | |
| "rewards/rejected": -23.070003509521484, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 4.817987152034261e-08, | |
| "logits/chosen": 2.460693120956421, | |
| "logits/rejected": 2.9349396228790283, | |
| "logps/chosen": -502.804931640625, | |
| "logps/rejected": -431.50335693359375, | |
| "loss": 0.2369, | |
| "rewards/accuracies": 0.9750000238418579, | |
| "rewards/chosen": 7.705231666564941, | |
| "rewards/margins": 32.00004959106445, | |
| "rewards/rejected": -24.294815063476562, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 4.925053533190578e-08, | |
| "logits/chosen": 2.333646297454834, | |
| "logits/rejected": 4.089064598083496, | |
| "logps/chosen": -422.8240661621094, | |
| "logps/rejected": -442.1726989746094, | |
| "loss": 0.1691, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": 4.384838581085205, | |
| "rewards/margins": 29.339359283447266, | |
| "rewards/rejected": -24.954524993896484, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 5.032119914346895e-08, | |
| "logits/chosen": 2.252077579498291, | |
| "logits/rejected": 3.398280382156372, | |
| "logps/chosen": -426.0081481933594, | |
| "logps/rejected": -431.38623046875, | |
| "loss": 0.2549, | |
| "rewards/accuracies": 0.9750000238418579, | |
| "rewards/chosen": 3.1347286701202393, | |
| "rewards/margins": 29.128936767578125, | |
| "rewards/rejected": -25.994205474853516, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 5.139186295503212e-08, | |
| "logits/chosen": 1.9574702978134155, | |
| "logits/rejected": 3.0777688026428223, | |
| "logps/chosen": -445.21435546875, | |
| "logps/rejected": -431.9685974121094, | |
| "loss": 0.2022, | |
| "rewards/accuracies": 0.9375, | |
| "rewards/chosen": 5.082431793212891, | |
| "rewards/margins": 33.55067825317383, | |
| "rewards/rejected": -28.468246459960938, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 5.246252676659528e-08, | |
| "logits/chosen": 2.4008655548095703, | |
| "logits/rejected": 3.1396138668060303, | |
| "logps/chosen": -396.2805480957031, | |
| "logps/rejected": -431.75994873046875, | |
| "loss": 0.1925, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": 3.1321897506713867, | |
| "rewards/margins": 31.742828369140625, | |
| "rewards/rejected": -28.610637664794922, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 5.353319057815846e-08, | |
| "logits/chosen": 2.6065423488616943, | |
| "logits/rejected": 3.01694917678833, | |
| "logps/chosen": -441.64752197265625, | |
| "logps/rejected": -450.52337646484375, | |
| "loss": 0.238, | |
| "rewards/accuracies": 0.9375, | |
| "rewards/chosen": 5.9151458740234375, | |
| "rewards/margins": 37.43492889404297, | |
| "rewards/rejected": -31.5197811126709, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "eval_logits/chosen": 1.8356637954711914, | |
| "eval_logits/rejected": 2.518232822418213, | |
| "eval_logps/chosen": -403.12066650390625, | |
| "eval_logps/rejected": -407.75311279296875, | |
| "eval_loss": 0.40245920419692993, | |
| "eval_rewards/accuracies": 0.9296875, | |
| "eval_rewards/chosen": 4.471410751342773, | |
| "eval_rewards/margins": 32.124061584472656, | |
| "eval_rewards/rejected": -27.652652740478516, | |
| "eval_runtime": 76.7691, | |
| "eval_samples_per_second": 13.026, | |
| "eval_steps_per_second": 0.417, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 5.460385438972163e-08, | |
| "logits/chosen": 2.370246410369873, | |
| "logits/rejected": 3.2074241638183594, | |
| "logps/chosen": -463.4814453125, | |
| "logps/rejected": -450.365478515625, | |
| "loss": 0.1896, | |
| "rewards/accuracies": 0.9750000238418579, | |
| "rewards/chosen": 4.760863304138184, | |
| "rewards/margins": 35.59904098510742, | |
| "rewards/rejected": -30.838184356689453, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 5.567451820128479e-08, | |
| "logits/chosen": 2.4717459678649902, | |
| "logits/rejected": 3.241302967071533, | |
| "logps/chosen": -476.26690673828125, | |
| "logps/rejected": -425.7784729003906, | |
| "loss": 0.2277, | |
| "rewards/accuracies": 0.9375, | |
| "rewards/chosen": 5.998467922210693, | |
| "rewards/margins": 37.100624084472656, | |
| "rewards/rejected": -31.102157592773438, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 5.6745182012847956e-08, | |
| "logits/chosen": 2.8885834217071533, | |
| "logits/rejected": 3.374998092651367, | |
| "logps/chosen": -399.87249755859375, | |
| "logps/rejected": -406.6166687011719, | |
| "loss": 0.2215, | |
| "rewards/accuracies": 0.9125000238418579, | |
| "rewards/chosen": 5.892712116241455, | |
| "rewards/margins": 36.44768524169922, | |
| "rewards/rejected": -30.554973602294922, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 5.781584582441114e-08, | |
| "logits/chosen": 2.536708116531372, | |
| "logits/rejected": 3.2403769493103027, | |
| "logps/chosen": -429.4972229003906, | |
| "logps/rejected": -423.7460021972656, | |
| "loss": 0.1918, | |
| "rewards/accuracies": 0.9624999761581421, | |
| "rewards/chosen": 7.94210958480835, | |
| "rewards/margins": 36.776371002197266, | |
| "rewards/rejected": -28.83426284790039, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 5.88865096359743e-08, | |
| "logits/chosen": 1.9649451971054077, | |
| "logits/rejected": 3.1596691608428955, | |
| "logps/chosen": -472.22119140625, | |
| "logps/rejected": -425.32763671875, | |
| "loss": 0.15, | |
| "rewards/accuracies": 0.9750000238418579, | |
| "rewards/chosen": 12.239465713500977, | |
| "rewards/margins": 48.21662139892578, | |
| "rewards/rejected": -35.977149963378906, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 5.995717344753747e-08, | |
| "logits/chosen": 3.032383441925049, | |
| "logits/rejected": 3.257586717605591, | |
| "logps/chosen": -416.17071533203125, | |
| "logps/rejected": -410.1018981933594, | |
| "loss": 0.1132, | |
| "rewards/accuracies": 0.9750000238418579, | |
| "rewards/chosen": 7.306866645812988, | |
| "rewards/margins": 37.124568939208984, | |
| "rewards/rejected": -29.81770896911621, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 6.102783725910064e-08, | |
| "logits/chosen": 2.7644500732421875, | |
| "logits/rejected": 3.5682806968688965, | |
| "logps/chosen": -383.52093505859375, | |
| "logps/rejected": -394.8160705566406, | |
| "loss": 0.2133, | |
| "rewards/accuracies": 0.949999988079071, | |
| "rewards/chosen": 5.922687530517578, | |
| "rewards/margins": 37.88574981689453, | |
| "rewards/rejected": -31.963062286376953, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 6.209850107066381e-08, | |
| "logits/chosen": 2.341892957687378, | |
| "logits/rejected": 3.260730028152466, | |
| "logps/chosen": -394.4947814941406, | |
| "logps/rejected": -405.29498291015625, | |
| "loss": 0.082, | |
| "rewards/accuracies": 0.9375, | |
| "rewards/chosen": 6.424149990081787, | |
| "rewards/margins": 38.39518737792969, | |
| "rewards/rejected": -31.971033096313477, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 6.316916488222698e-08, | |
| "logits/chosen": 2.526381015777588, | |
| "logits/rejected": 3.192876100540161, | |
| "logps/chosen": -426.3685607910156, | |
| "logps/rejected": -403.46759033203125, | |
| "loss": 0.1342, | |
| "rewards/accuracies": 0.9750000238418579, | |
| "rewards/chosen": 3.9063773155212402, | |
| "rewards/margins": 39.68169021606445, | |
| "rewards/rejected": -35.77531814575195, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 6.423982869379014e-08, | |
| "logits/chosen": 2.456986427307129, | |
| "logits/rejected": 2.974177837371826, | |
| "logps/chosen": -430.700927734375, | |
| "logps/rejected": -460.1724548339844, | |
| "loss": 0.0835, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": 2.9916865825653076, | |
| "rewards/margins": 47.707908630371094, | |
| "rewards/rejected": -44.716224670410156, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "eval_logits/chosen": 1.8376859426498413, | |
| "eval_logits/rejected": 2.519270658493042, | |
| "eval_logps/chosen": -404.12213134765625, | |
| "eval_logps/rejected": -409.9280700683594, | |
| "eval_loss": 0.5330207943916321, | |
| "eval_rewards/accuracies": 0.9453125, | |
| "eval_rewards/chosen": -0.5357855558395386, | |
| "eval_rewards/margins": 37.99186325073242, | |
| "eval_rewards/rejected": -38.52764892578125, | |
| "eval_runtime": 76.8308, | |
| "eval_samples_per_second": 13.016, | |
| "eval_steps_per_second": 0.416, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 6.531049250535332e-08, | |
| "logits/chosen": 2.4591498374938965, | |
| "logits/rejected": 3.2855117321014404, | |
| "logps/chosen": -401.3484802246094, | |
| "logps/rejected": -434.5301208496094, | |
| "loss": 0.142, | |
| "rewards/accuracies": 0.9375, | |
| "rewards/chosen": -0.02256155014038086, | |
| "rewards/margins": 46.42057418823242, | |
| "rewards/rejected": -46.443138122558594, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 6.638115631691649e-08, | |
| "logits/chosen": 2.539071559906006, | |
| "logits/rejected": 3.2421278953552246, | |
| "logps/chosen": -442.18243408203125, | |
| "logps/rejected": -444.18878173828125, | |
| "loss": 0.1058, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 2.043628692626953, | |
| "rewards/margins": 43.48805618286133, | |
| "rewards/rejected": -41.444427490234375, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 6.745182012847965e-08, | |
| "logits/chosen": 2.5688178539276123, | |
| "logits/rejected": 3.445737838745117, | |
| "logps/chosen": -411.5978088378906, | |
| "logps/rejected": -407.0299072265625, | |
| "loss": 0.1525, | |
| "rewards/accuracies": 0.9375, | |
| "rewards/chosen": 1.022148609161377, | |
| "rewards/margins": 39.893680572509766, | |
| "rewards/rejected": -38.87153244018555, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 6.852248394004282e-08, | |
| "logits/chosen": 2.5901875495910645, | |
| "logits/rejected": 2.911647319793701, | |
| "logps/chosen": -420.1778869628906, | |
| "logps/rejected": -382.272216796875, | |
| "loss": 0.2718, | |
| "rewards/accuracies": 0.9624999761581421, | |
| "rewards/chosen": 3.5251822471618652, | |
| "rewards/margins": 41.309791564941406, | |
| "rewards/rejected": -37.784610748291016, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 6.9593147751606e-08, | |
| "logits/chosen": 2.287266492843628, | |
| "logits/rejected": 3.33722186088562, | |
| "logps/chosen": -452.6976013183594, | |
| "logps/rejected": -439.2931213378906, | |
| "loss": 0.0169, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 6.000566482543945, | |
| "rewards/margins": 46.02397918701172, | |
| "rewards/rejected": -40.023414611816406, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 7.066381156316916e-08, | |
| "logits/chosen": 2.2046732902526855, | |
| "logits/rejected": 3.6667628288269043, | |
| "logps/chosen": -403.92205810546875, | |
| "logps/rejected": -408.7096862792969, | |
| "loss": 0.2138, | |
| "rewards/accuracies": 0.9624999761581421, | |
| "rewards/chosen": 6.08463191986084, | |
| "rewards/margins": 44.13505554199219, | |
| "rewards/rejected": -38.0504264831543, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 7.173447537473233e-08, | |
| "logits/chosen": 2.4322450160980225, | |
| "logits/rejected": 3.5664360523223877, | |
| "logps/chosen": -416.0870056152344, | |
| "logps/rejected": -415.59197998046875, | |
| "loss": 0.1237, | |
| "rewards/accuracies": 0.9750000238418579, | |
| "rewards/chosen": 4.357824325561523, | |
| "rewards/margins": 44.89923858642578, | |
| "rewards/rejected": -40.541419982910156, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 7.28051391862955e-08, | |
| "logits/chosen": 2.468834638595581, | |
| "logits/rejected": 3.187020778656006, | |
| "logps/chosen": -395.1986083984375, | |
| "logps/rejected": -432.7027282714844, | |
| "loss": 0.1312, | |
| "rewards/accuracies": 0.9624999761581421, | |
| "rewards/chosen": 7.26413631439209, | |
| "rewards/margins": 49.74661636352539, | |
| "rewards/rejected": -42.48247528076172, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 7.387580299785867e-08, | |
| "logits/chosen": 2.341510772705078, | |
| "logits/rejected": 3.5096206665039062, | |
| "logps/chosen": -391.4788513183594, | |
| "logps/rejected": -392.6119384765625, | |
| "loss": 0.1998, | |
| "rewards/accuracies": 0.949999988079071, | |
| "rewards/chosen": 6.877418518066406, | |
| "rewards/margins": 45.81779479980469, | |
| "rewards/rejected": -38.94037628173828, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 7.494646680942184e-08, | |
| "logits/chosen": 2.7313547134399414, | |
| "logits/rejected": 3.659938097000122, | |
| "logps/chosen": -375.4259033203125, | |
| "logps/rejected": -431.1378479003906, | |
| "loss": 0.1294, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": 4.9859724044799805, | |
| "rewards/margins": 47.63801574707031, | |
| "rewards/rejected": -42.65203857421875, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "eval_logits/chosen": 1.8345167636871338, | |
| "eval_logits/rejected": 2.522487163543701, | |
| "eval_logps/chosen": -403.2203674316406, | |
| "eval_logps/rejected": -410.374755859375, | |
| "eval_loss": 0.40866366028785706, | |
| "eval_rewards/accuracies": 0.953125, | |
| "eval_rewards/chosen": 3.9727911949157715, | |
| "eval_rewards/margins": 44.73374938964844, | |
| "eval_rewards/rejected": -40.760955810546875, | |
| "eval_runtime": 76.9261, | |
| "eval_samples_per_second": 12.999, | |
| "eval_steps_per_second": 0.416, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 7.601713062098501e-08, | |
| "logits/chosen": 2.3793327808380127, | |
| "logits/rejected": 3.142347574234009, | |
| "logps/chosen": -423.20880126953125, | |
| "logps/rejected": -401.803955078125, | |
| "loss": 0.1473, | |
| "rewards/accuracies": 0.9750000238418579, | |
| "rewards/chosen": 8.15241527557373, | |
| "rewards/margins": 47.408851623535156, | |
| "rewards/rejected": -39.256431579589844, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 7.708779443254818e-08, | |
| "logits/chosen": 2.7316575050354004, | |
| "logits/rejected": 3.029636859893799, | |
| "logps/chosen": -434.23480224609375, | |
| "logps/rejected": -403.28717041015625, | |
| "loss": 0.1684, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 10.977544784545898, | |
| "rewards/margins": 50.05615234375, | |
| "rewards/rejected": -39.07860565185547, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 7.815845824411135e-08, | |
| "logits/chosen": 2.5688631534576416, | |
| "logits/rejected": 3.0123863220214844, | |
| "logps/chosen": -449.0638732910156, | |
| "logps/rejected": -398.37152099609375, | |
| "loss": 0.0823, | |
| "rewards/accuracies": 0.9624999761581421, | |
| "rewards/chosen": 8.119828224182129, | |
| "rewards/margins": 45.32478332519531, | |
| "rewards/rejected": -37.2049560546875, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 7.922912205567452e-08, | |
| "logits/chosen": 2.6267566680908203, | |
| "logits/rejected": 3.3151562213897705, | |
| "logps/chosen": -390.72698974609375, | |
| "logps/rejected": -381.4679260253906, | |
| "loss": 0.2223, | |
| "rewards/accuracies": 0.9750000238418579, | |
| "rewards/chosen": 6.810037136077881, | |
| "rewards/margins": 40.27370071411133, | |
| "rewards/rejected": -33.46365737915039, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 8.029978586723767e-08, | |
| "logits/chosen": 2.355883836746216, | |
| "logits/rejected": 3.5080723762512207, | |
| "logps/chosen": -395.70184326171875, | |
| "logps/rejected": -403.0619812011719, | |
| "loss": 0.1436, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 9.782814025878906, | |
| "rewards/margins": 49.49211883544922, | |
| "rewards/rejected": -39.70930480957031, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 8.137044967880086e-08, | |
| "logits/chosen": 2.538801670074463, | |
| "logits/rejected": 2.962019205093384, | |
| "logps/chosen": -395.83062744140625, | |
| "logps/rejected": -439.6985778808594, | |
| "loss": 0.1789, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 6.836484432220459, | |
| "rewards/margins": 59.103851318359375, | |
| "rewards/rejected": -52.267372131347656, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 8.244111349036403e-08, | |
| "logits/chosen": 2.372373104095459, | |
| "logits/rejected": 3.526844024658203, | |
| "logps/chosen": -391.49090576171875, | |
| "logps/rejected": -436.17193603515625, | |
| "loss": 0.1816, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": 1.2613162994384766, | |
| "rewards/margins": 54.581298828125, | |
| "rewards/rejected": -53.31998825073242, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 8.351177730192718e-08, | |
| "logits/chosen": 2.641442060470581, | |
| "logits/rejected": 3.504525661468506, | |
| "logps/chosen": -429.4436950683594, | |
| "logps/rejected": -426.1357421875, | |
| "loss": 0.0799, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": -0.4186324179172516, | |
| "rewards/margins": 56.703216552734375, | |
| "rewards/rejected": -57.121849060058594, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 8.458244111349035e-08, | |
| "logits/chosen": 2.8256888389587402, | |
| "logits/rejected": 3.2680656909942627, | |
| "logps/chosen": -432.7039489746094, | |
| "logps/rejected": -435.9918518066406, | |
| "loss": 0.1115, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.19469785690307617, | |
| "rewards/margins": 58.439208984375, | |
| "rewards/rejected": -58.2445182800293, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 8.565310492505354e-08, | |
| "logits/chosen": 2.2154085636138916, | |
| "logits/rejected": 3.348832607269287, | |
| "logps/chosen": -443.20269775390625, | |
| "logps/rejected": -411.3150939941406, | |
| "loss": 0.2809, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 7.068073272705078, | |
| "rewards/margins": 56.71687698364258, | |
| "rewards/rejected": -49.648807525634766, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "eval_logits/chosen": 1.8377827405929565, | |
| "eval_logits/rejected": 2.5198471546173096, | |
| "eval_logps/chosen": -403.1432189941406, | |
| "eval_logps/rejected": -412.51531982421875, | |
| "eval_loss": 0.5127639770507812, | |
| "eval_rewards/accuracies": 0.953125, | |
| "eval_rewards/chosen": 4.35873556137085, | |
| "eval_rewards/margins": 55.822689056396484, | |
| "eval_rewards/rejected": -51.463958740234375, | |
| "eval_runtime": 76.8497, | |
| "eval_samples_per_second": 13.012, | |
| "eval_steps_per_second": 0.416, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 8.672376873661669e-08, | |
| "logits/chosen": 2.567979335784912, | |
| "logits/rejected": 3.7970213890075684, | |
| "logps/chosen": -379.3563232421875, | |
| "logps/rejected": -417.9363708496094, | |
| "loss": 0.1456, | |
| "rewards/accuracies": 0.9624999761581421, | |
| "rewards/chosen": 6.566949367523193, | |
| "rewards/margins": 58.88117218017578, | |
| "rewards/rejected": -52.31422805786133, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 8.779443254817986e-08, | |
| "logits/chosen": 2.987666130065918, | |
| "logits/rejected": 3.0821828842163086, | |
| "logps/chosen": -375.0899353027344, | |
| "logps/rejected": -380.730712890625, | |
| "loss": 0.113, | |
| "rewards/accuracies": 0.9750000238418579, | |
| "rewards/chosen": 4.425510406494141, | |
| "rewards/margins": 57.55885696411133, | |
| "rewards/rejected": -53.13335037231445, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 8.886509635974304e-08, | |
| "logits/chosen": 2.3225650787353516, | |
| "logits/rejected": 3.374905824661255, | |
| "logps/chosen": -404.91314697265625, | |
| "logps/rejected": -406.26690673828125, | |
| "loss": 0.221, | |
| "rewards/accuracies": 0.9750000238418579, | |
| "rewards/chosen": 0.956459641456604, | |
| "rewards/margins": 58.1107177734375, | |
| "rewards/rejected": -57.15425491333008, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 8.99357601713062e-08, | |
| "logits/chosen": 2.3407421112060547, | |
| "logits/rejected": 3.029287815093994, | |
| "logps/chosen": -389.4285583496094, | |
| "logps/rejected": -422.2571716308594, | |
| "loss": 0.123, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 5.533617973327637, | |
| "rewards/margins": 57.36924362182617, | |
| "rewards/rejected": -51.83562469482422, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 9.100642398286937e-08, | |
| "logits/chosen": 2.4234728813171387, | |
| "logits/rejected": 3.6180338859558105, | |
| "logps/chosen": -384.3294982910156, | |
| "logps/rejected": -382.1348876953125, | |
| "loss": 0.1292, | |
| "rewards/accuracies": 0.9624999761581421, | |
| "rewards/chosen": 5.42165994644165, | |
| "rewards/margins": 53.802391052246094, | |
| "rewards/rejected": -48.38072967529297, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 9.207708779443254e-08, | |
| "logits/chosen": 2.336247444152832, | |
| "logits/rejected": 3.8205437660217285, | |
| "logps/chosen": -427.49468994140625, | |
| "logps/rejected": -434.2718811035156, | |
| "loss": 0.0955, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": 17.532028198242188, | |
| "rewards/margins": 70.969482421875, | |
| "rewards/rejected": -53.437461853027344, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 9.314775160599571e-08, | |
| "logits/chosen": 2.201402187347412, | |
| "logits/rejected": 3.6681246757507324, | |
| "logps/chosen": -437.94091796875, | |
| "logps/rejected": -429.28021240234375, | |
| "loss": 0.0989, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 10.197607040405273, | |
| "rewards/margins": 63.349510192871094, | |
| "rewards/rejected": -53.15190505981445, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 9.421841541755888e-08, | |
| "logits/chosen": 2.64699125289917, | |
| "logits/rejected": 2.797672986984253, | |
| "logps/chosen": -419.5973205566406, | |
| "logps/rejected": -427.1859436035156, | |
| "loss": 0.2619, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": 5.267810344696045, | |
| "rewards/margins": 64.8418197631836, | |
| "rewards/rejected": -59.573997497558594, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 9.528907922912205e-08, | |
| "logits/chosen": 2.1525919437408447, | |
| "logits/rejected": 3.06669545173645, | |
| "logps/chosen": -447.0118103027344, | |
| "logps/rejected": -425.5939025878906, | |
| "loss": 0.1568, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 12.983186721801758, | |
| "rewards/margins": 62.67449188232422, | |
| "rewards/rejected": -49.69130325317383, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "learning_rate": 9.635974304068522e-08, | |
| "logits/chosen": 2.4299418926239014, | |
| "logits/rejected": 3.4242959022521973, | |
| "logps/chosen": -390.6571350097656, | |
| "logps/rejected": -455.71331787109375, | |
| "loss": 0.2051, | |
| "rewards/accuracies": 0.9624999761581421, | |
| "rewards/chosen": 5.089010715484619, | |
| "rewards/margins": 74.85060119628906, | |
| "rewards/rejected": -69.76158905029297, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "eval_logits/chosen": 1.8021771907806396, | |
| "eval_logits/rejected": 2.491039752960205, | |
| "eval_logps/chosen": -400.8381042480469, | |
| "eval_logps/rejected": -412.0938720703125, | |
| "eval_loss": 0.5601515173912048, | |
| "eval_rewards/accuracies": 0.9375, | |
| "eval_rewards/chosen": 15.884079933166504, | |
| "eval_rewards/margins": 65.24065399169922, | |
| "eval_rewards/rejected": -49.3565673828125, | |
| "eval_runtime": 77.0047, | |
| "eval_samples_per_second": 12.986, | |
| "eval_steps_per_second": 0.416, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "learning_rate": 9.743040685224839e-08, | |
| "logits/chosen": 2.6785802841186523, | |
| "logits/rejected": 3.158878803253174, | |
| "logps/chosen": -364.9392395019531, | |
| "logps/rejected": -421.75604248046875, | |
| "loss": 0.1121, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": 15.941950798034668, | |
| "rewards/margins": 70.44316101074219, | |
| "rewards/rejected": -54.501220703125, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "learning_rate": 9.850107066381156e-08, | |
| "logits/chosen": 2.1345481872558594, | |
| "logits/rejected": 3.1029703617095947, | |
| "logps/chosen": -467.07318115234375, | |
| "logps/rejected": -428.45220947265625, | |
| "loss": 0.2077, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 8.983004570007324, | |
| "rewards/margins": 70.48181915283203, | |
| "rewards/rejected": -61.49879837036133, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 9.957173447537473e-08, | |
| "logits/chosen": 2.542916774749756, | |
| "logits/rejected": 3.5035908222198486, | |
| "logps/chosen": -423.0205993652344, | |
| "logps/rejected": -416.65509033203125, | |
| "loss": 0.2814, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": 5.553844928741455, | |
| "rewards/margins": 68.67607116699219, | |
| "rewards/rejected": -63.122222900390625, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 9.992858843132586e-08, | |
| "logits/chosen": 2.491159439086914, | |
| "logits/rejected": 3.259873867034912, | |
| "logps/chosen": -475.2666931152344, | |
| "logps/rejected": -461.07061767578125, | |
| "loss": 0.1522, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": 4.259446144104004, | |
| "rewards/margins": 78.9701919555664, | |
| "rewards/rejected": -74.71073913574219, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 9.980956915020233e-08, | |
| "logits/chosen": 2.68623423576355, | |
| "logits/rejected": 3.0718812942504883, | |
| "logps/chosen": -361.89483642578125, | |
| "logps/rejected": -413.39288330078125, | |
| "loss": 0.3246, | |
| "rewards/accuracies": 0.9624999761581421, | |
| "rewards/chosen": 8.68491268157959, | |
| "rewards/margins": 75.4022445678711, | |
| "rewards/rejected": -66.71733093261719, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 9.969054986907879e-08, | |
| "logits/chosen": 2.752331495285034, | |
| "logits/rejected": 3.481398344039917, | |
| "logps/chosen": -392.8135986328125, | |
| "logps/rejected": -413.65313720703125, | |
| "loss": 0.2217, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 3.2605032920837402, | |
| "rewards/margins": 72.24468994140625, | |
| "rewards/rejected": -68.98418426513672, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 9.957153058795524e-08, | |
| "logits/chosen": 2.0877063274383545, | |
| "logits/rejected": 3.1967997550964355, | |
| "logps/chosen": -398.50030517578125, | |
| "logps/rejected": -458.2386779785156, | |
| "loss": 0.1414, | |
| "rewards/accuracies": 0.9750000238418579, | |
| "rewards/chosen": 4.164697170257568, | |
| "rewards/margins": 79.06629943847656, | |
| "rewards/rejected": -74.90159606933594, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 9.94525113068317e-08, | |
| "logits/chosen": 2.6983773708343506, | |
| "logits/rejected": 3.458531141281128, | |
| "logps/chosen": -400.029296875, | |
| "logps/rejected": -411.06903076171875, | |
| "loss": 0.2352, | |
| "rewards/accuracies": 0.9750000238418579, | |
| "rewards/chosen": 4.437505722045898, | |
| "rewards/margins": 78.08453369140625, | |
| "rewards/rejected": -73.64701843261719, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "learning_rate": 9.933349202570817e-08, | |
| "logits/chosen": 2.2401540279388428, | |
| "logits/rejected": 3.9224307537078857, | |
| "logps/chosen": -416.910400390625, | |
| "logps/rejected": -441.75482177734375, | |
| "loss": 0.1331, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 7.2108893394470215, | |
| "rewards/margins": 89.5989990234375, | |
| "rewards/rejected": -82.38809967041016, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "learning_rate": 9.921447274458463e-08, | |
| "logits/chosen": 2.333087682723999, | |
| "logits/rejected": 3.3145065307617188, | |
| "logps/chosen": -430.5704650878906, | |
| "logps/rejected": -428.00384521484375, | |
| "loss": 0.2009, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -3.077216863632202, | |
| "rewards/margins": 81.33201599121094, | |
| "rewards/rejected": -84.40922546386719, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "eval_logits/chosen": 1.85965096950531, | |
| "eval_logits/rejected": 2.5712876319885254, | |
| "eval_logps/chosen": -405.8722839355469, | |
| "eval_logps/rejected": -420.0833740234375, | |
| "eval_loss": 0.873622715473175, | |
| "eval_rewards/accuracies": 0.9296875, | |
| "eval_rewards/chosen": -9.286641120910645, | |
| "eval_rewards/margins": 80.01751708984375, | |
| "eval_rewards/rejected": -89.30416107177734, | |
| "eval_runtime": 76.9649, | |
| "eval_samples_per_second": 12.993, | |
| "eval_steps_per_second": 0.416, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "learning_rate": 9.909545346346108e-08, | |
| "logits/chosen": 2.4723129272460938, | |
| "logits/rejected": 3.478045701980591, | |
| "logps/chosen": -388.63848876953125, | |
| "logps/rejected": -405.98748779296875, | |
| "loss": 0.3397, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": 3.27624773979187, | |
| "rewards/margins": 83.57379150390625, | |
| "rewards/rejected": -80.29755401611328, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "learning_rate": 9.897643418233753e-08, | |
| "logits/chosen": 2.1651980876922607, | |
| "logits/rejected": 3.202363967895508, | |
| "logps/chosen": -439.97894287109375, | |
| "logps/rejected": -464.5369567871094, | |
| "loss": 0.296, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": 9.399145126342773, | |
| "rewards/margins": 104.0501480102539, | |
| "rewards/rejected": -94.65100860595703, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "learning_rate": 9.885741490121398e-08, | |
| "logits/chosen": 2.6700689792633057, | |
| "logits/rejected": 3.753943681716919, | |
| "logps/chosen": -412.857666015625, | |
| "logps/rejected": -408.2137756347656, | |
| "loss": 0.267, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 17.997615814208984, | |
| "rewards/margins": 86.39213562011719, | |
| "rewards/rejected": -68.39451599121094, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 9.873839562009045e-08, | |
| "logits/chosen": 2.110105514526367, | |
| "logits/rejected": 3.5830421447753906, | |
| "logps/chosen": -426.0606994628906, | |
| "logps/rejected": -430.07073974609375, | |
| "loss": 0.1166, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": 16.306238174438477, | |
| "rewards/margins": 97.02738952636719, | |
| "rewards/rejected": -80.72114562988281, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 9.861937633896691e-08, | |
| "logits/chosen": 2.830747127532959, | |
| "logits/rejected": 3.9411635398864746, | |
| "logps/chosen": -391.10858154296875, | |
| "logps/rejected": -417.4032287597656, | |
| "loss": 0.3818, | |
| "rewards/accuracies": 0.949999988079071, | |
| "rewards/chosen": 4.93004846572876, | |
| "rewards/margins": 88.80459594726562, | |
| "rewards/rejected": -83.87454223632812, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "learning_rate": 9.850035705784336e-08, | |
| "logits/chosen": 2.3416190147399902, | |
| "logits/rejected": 3.0985264778137207, | |
| "logps/chosen": -461.04718017578125, | |
| "logps/rejected": -448.1722106933594, | |
| "loss": 0.0969, | |
| "rewards/accuracies": 0.9624999761581421, | |
| "rewards/chosen": 14.76380443572998, | |
| "rewards/margins": 97.76150512695312, | |
| "rewards/rejected": -82.99769592285156, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "learning_rate": 9.838133777671982e-08, | |
| "logits/chosen": 2.414294719696045, | |
| "logits/rejected": 3.4742074012756348, | |
| "logps/chosen": -384.5827941894531, | |
| "logps/rejected": -398.83050537109375, | |
| "loss": 0.4072, | |
| "rewards/accuracies": 0.9375, | |
| "rewards/chosen": 10.109460830688477, | |
| "rewards/margins": 92.21754455566406, | |
| "rewards/rejected": -82.10807037353516, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "learning_rate": 9.826231849559629e-08, | |
| "logits/chosen": 2.7465944290161133, | |
| "logits/rejected": 3.1911470890045166, | |
| "logps/chosen": -422.5624084472656, | |
| "logps/rejected": -440.286376953125, | |
| "loss": 0.1751, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 17.269311904907227, | |
| "rewards/margins": 104.30892181396484, | |
| "rewards/rejected": -87.03959655761719, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "learning_rate": 9.814329921447275e-08, | |
| "logits/chosen": 2.822819232940674, | |
| "logits/rejected": 3.005201816558838, | |
| "logps/chosen": -391.0602111816406, | |
| "logps/rejected": -419.813720703125, | |
| "loss": 0.3021, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 13.785235404968262, | |
| "rewards/margins": 121.23738098144531, | |
| "rewards/rejected": -107.45216369628906, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "learning_rate": 9.80242799333492e-08, | |
| "logits/chosen": 2.8149497509002686, | |
| "logits/rejected": 3.3617329597473145, | |
| "logps/chosen": -402.799560546875, | |
| "logps/rejected": -425.45343017578125, | |
| "loss": 0.2838, | |
| "rewards/accuracies": 0.925000011920929, | |
| "rewards/chosen": 14.5247163772583, | |
| "rewards/margins": 105.98973083496094, | |
| "rewards/rejected": -91.46500396728516, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "eval_logits/chosen": 1.8558049201965332, | |
| "eval_logits/rejected": 2.5366647243499756, | |
| "eval_logps/chosen": -402.8280944824219, | |
| "eval_logps/rejected": -421.0693054199219, | |
| "eval_loss": 0.8600718975067139, | |
| "eval_rewards/accuracies": 0.953125, | |
| "eval_rewards/chosen": 5.934433460235596, | |
| "eval_rewards/margins": 100.16818237304688, | |
| "eval_rewards/rejected": -94.23374938964844, | |
| "eval_runtime": 76.8109, | |
| "eval_samples_per_second": 13.019, | |
| "eval_steps_per_second": 0.417, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "learning_rate": 9.790526065222565e-08, | |
| "logits/chosen": 2.653266429901123, | |
| "logits/rejected": 3.1961002349853516, | |
| "logps/chosen": -445.80322265625, | |
| "logps/rejected": -429.64373779296875, | |
| "loss": 0.5293, | |
| "rewards/accuracies": 0.9750000238418579, | |
| "rewards/chosen": 21.07932472229004, | |
| "rewards/margins": 100.654052734375, | |
| "rewards/rejected": -79.57472229003906, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "learning_rate": 9.778624137110211e-08, | |
| "logits/chosen": 2.4288971424102783, | |
| "logits/rejected": 3.4981236457824707, | |
| "logps/chosen": -452.72967529296875, | |
| "logps/rejected": -437.428955078125, | |
| "loss": 0.1353, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": 19.18131446838379, | |
| "rewards/margins": 117.1941146850586, | |
| "rewards/rejected": -98.01280212402344, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "learning_rate": 9.766722208997857e-08, | |
| "logits/chosen": 2.525935173034668, | |
| "logits/rejected": 3.092148780822754, | |
| "logps/chosen": -402.27886962890625, | |
| "logps/rejected": -428.43280029296875, | |
| "loss": 0.1894, | |
| "rewards/accuracies": 0.9750000238418579, | |
| "rewards/chosen": 20.851654052734375, | |
| "rewards/margins": 118.8115234375, | |
| "rewards/rejected": -97.95986938476562, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "learning_rate": 9.754820280885503e-08, | |
| "logits/chosen": 2.715815782546997, | |
| "logits/rejected": 4.007362365722656, | |
| "logps/chosen": -399.8186340332031, | |
| "logps/rejected": -423.14862060546875, | |
| "loss": 0.1684, | |
| "rewards/accuracies": 0.9750000238418579, | |
| "rewards/chosen": 9.72397232055664, | |
| "rewards/margins": 105.40447998046875, | |
| "rewards/rejected": -95.68050384521484, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "learning_rate": 9.742918352773148e-08, | |
| "logits/chosen": 2.2262580394744873, | |
| "logits/rejected": 3.2408432960510254, | |
| "logps/chosen": -477.47711181640625, | |
| "logps/rejected": -495.76043701171875, | |
| "loss": 0.1725, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": 6.365964412689209, | |
| "rewards/margins": 132.43780517578125, | |
| "rewards/rejected": -126.07185363769531, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 9.731016424660795e-08, | |
| "logits/chosen": 2.7380359172821045, | |
| "logits/rejected": 3.5110669136047363, | |
| "logps/chosen": -398.9952392578125, | |
| "logps/rejected": -426.838623046875, | |
| "loss": 0.2338, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 12.770757675170898, | |
| "rewards/margins": 123.94891357421875, | |
| "rewards/rejected": -111.17814636230469, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 9.719114496548441e-08, | |
| "logits/chosen": 2.0883517265319824, | |
| "logits/rejected": 3.5194995403289795, | |
| "logps/chosen": -424.92803955078125, | |
| "logps/rejected": -390.85784912109375, | |
| "loss": 0.3672, | |
| "rewards/accuracies": 0.9624999761581421, | |
| "rewards/chosen": 9.357166290283203, | |
| "rewards/margins": 103.42280578613281, | |
| "rewards/rejected": -94.06563568115234, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "learning_rate": 9.707212568436087e-08, | |
| "logits/chosen": 2.170081615447998, | |
| "logits/rejected": 3.3753578662872314, | |
| "logps/chosen": -437.91082763671875, | |
| "logps/rejected": -430.90966796875, | |
| "loss": 0.0796, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 8.644025802612305, | |
| "rewards/margins": 108.9352798461914, | |
| "rewards/rejected": -100.2912368774414, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "learning_rate": 9.695310640323732e-08, | |
| "logits/chosen": 2.7604737281799316, | |
| "logits/rejected": 3.7521843910217285, | |
| "logps/chosen": -370.9176025390625, | |
| "logps/rejected": -435.0440979003906, | |
| "loss": 0.2187, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 10.98758316040039, | |
| "rewards/margins": 134.98072814941406, | |
| "rewards/rejected": -123.9931411743164, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "learning_rate": 9.683408712211378e-08, | |
| "logits/chosen": 2.3693931102752686, | |
| "logits/rejected": 3.17862606048584, | |
| "logps/chosen": -467.9139099121094, | |
| "logps/rejected": -460.11297607421875, | |
| "loss": 0.3489, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": 3.4328982830047607, | |
| "rewards/margins": 135.82644653320312, | |
| "rewards/rejected": -132.3935546875, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "eval_logits/chosen": 1.8742191791534424, | |
| "eval_logits/rejected": 2.560530662536621, | |
| "eval_logps/chosen": -404.9606628417969, | |
| "eval_logps/rejected": -424.3955383300781, | |
| "eval_loss": 1.1546683311462402, | |
| "eval_rewards/accuracies": 0.9453125, | |
| "eval_rewards/chosen": -4.728623390197754, | |
| "eval_rewards/margins": 106.13644409179688, | |
| "eval_rewards/rejected": -110.86505889892578, | |
| "eval_runtime": 77.052, | |
| "eval_samples_per_second": 12.978, | |
| "eval_steps_per_second": 0.415, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "learning_rate": 9.671506784099024e-08, | |
| "logits/chosen": 2.997239589691162, | |
| "logits/rejected": 3.5606212615966797, | |
| "logps/chosen": -392.54229736328125, | |
| "logps/rejected": -423.11322021484375, | |
| "loss": 0.0937, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": -0.05036654323339462, | |
| "rewards/margins": 117.02787780761719, | |
| "rewards/rejected": -117.07823181152344, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "learning_rate": 9.659604855986669e-08, | |
| "logits/chosen": 2.1404788494110107, | |
| "logits/rejected": 3.323446273803711, | |
| "logps/chosen": -427.2289123535156, | |
| "logps/rejected": -433.6624450683594, | |
| "loss": 0.2752, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 12.247556686401367, | |
| "rewards/margins": 107.3965835571289, | |
| "rewards/rejected": -95.14904022216797, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "learning_rate": 9.647702927874315e-08, | |
| "logits/chosen": 2.563352346420288, | |
| "logits/rejected": 3.0540013313293457, | |
| "logps/chosen": -447.8446350097656, | |
| "logps/rejected": -452.87969970703125, | |
| "loss": 0.0435, | |
| "rewards/accuracies": 0.9750000238418579, | |
| "rewards/chosen": 23.387487411499023, | |
| "rewards/margins": 119.50389099121094, | |
| "rewards/rejected": -96.11639404296875, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "learning_rate": 9.63580099976196e-08, | |
| "logits/chosen": 2.1849985122680664, | |
| "logits/rejected": 3.4553771018981934, | |
| "logps/chosen": -387.8475341796875, | |
| "logps/rejected": -393.53192138671875, | |
| "loss": 0.2143, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 26.278972625732422, | |
| "rewards/margins": 115.37886810302734, | |
| "rewards/rejected": -89.09989929199219, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "learning_rate": 9.623899071649607e-08, | |
| "logits/chosen": 3.139633893966675, | |
| "logits/rejected": 3.37255859375, | |
| "logps/chosen": -398.74005126953125, | |
| "logps/rejected": -418.0592346191406, | |
| "loss": 0.0799, | |
| "rewards/accuracies": 0.9750000238418579, | |
| "rewards/chosen": 22.002050399780273, | |
| "rewards/margins": 125.72208404541016, | |
| "rewards/rejected": -103.72003173828125, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 9.611997143537253e-08, | |
| "logits/chosen": 2.665964126586914, | |
| "logits/rejected": 2.8448374271392822, | |
| "logps/chosen": -427.25677490234375, | |
| "logps/rejected": -430.95025634765625, | |
| "loss": 0.3788, | |
| "rewards/accuracies": 0.9624999761581421, | |
| "rewards/chosen": 21.40536880493164, | |
| "rewards/margins": 111.3967056274414, | |
| "rewards/rejected": -89.9913330078125, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "learning_rate": 9.600095215424899e-08, | |
| "logits/chosen": 2.7928450107574463, | |
| "logits/rejected": 3.595740795135498, | |
| "logps/chosen": -382.04327392578125, | |
| "logps/rejected": -431.33734130859375, | |
| "loss": 0.3577, | |
| "rewards/accuracies": 0.9624999761581421, | |
| "rewards/chosen": 16.352216720581055, | |
| "rewards/margins": 119.71681213378906, | |
| "rewards/rejected": -103.3646011352539, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "learning_rate": 9.588193287312544e-08, | |
| "logits/chosen": 2.237015962600708, | |
| "logits/rejected": 2.9541878700256348, | |
| "logps/chosen": -421.40032958984375, | |
| "logps/rejected": -427.09649658203125, | |
| "loss": 0.0439, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 19.52680015563965, | |
| "rewards/margins": 119.0426254272461, | |
| "rewards/rejected": -99.51582336425781, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "learning_rate": 9.57629135920019e-08, | |
| "logits/chosen": 2.776695728302002, | |
| "logits/rejected": 3.7990450859069824, | |
| "logps/chosen": -398.505859375, | |
| "logps/rejected": -432.1246032714844, | |
| "loss": 0.5895, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 19.98464012145996, | |
| "rewards/margins": 124.7027816772461, | |
| "rewards/rejected": -104.7181625366211, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "learning_rate": 9.564389431087836e-08, | |
| "logits/chosen": 2.5397887229919434, | |
| "logits/rejected": 3.605865478515625, | |
| "logps/chosen": -403.42132568359375, | |
| "logps/rejected": -455.809814453125, | |
| "loss": 0.1571, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 29.50467872619629, | |
| "rewards/margins": 132.95152282714844, | |
| "rewards/rejected": -103.44685363769531, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "eval_logits/chosen": 1.7718256711959839, | |
| "eval_logits/rejected": 2.484117031097412, | |
| "eval_logps/chosen": -400.06805419921875, | |
| "eval_logps/rejected": -421.99560546875, | |
| "eval_loss": 0.9890532493591309, | |
| "eval_rewards/accuracies": 0.9453125, | |
| "eval_rewards/chosen": 19.734703063964844, | |
| "eval_rewards/margins": 118.60000610351562, | |
| "eval_rewards/rejected": -98.86530303955078, | |
| "eval_runtime": 76.9799, | |
| "eval_samples_per_second": 12.99, | |
| "eval_steps_per_second": 0.416, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "learning_rate": 9.552487502975481e-08, | |
| "logits/chosen": 2.1767947673797607, | |
| "logits/rejected": 3.4365925788879395, | |
| "logps/chosen": -440.6006774902344, | |
| "logps/rejected": -473.36700439453125, | |
| "loss": 0.3716, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": 15.208317756652832, | |
| "rewards/margins": 130.98887634277344, | |
| "rewards/rejected": -115.78055572509766, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "learning_rate": 9.540585574863127e-08, | |
| "logits/chosen": 2.517340898513794, | |
| "logits/rejected": 3.1486434936523438, | |
| "logps/chosen": -437.44854736328125, | |
| "logps/rejected": -439.794921875, | |
| "loss": 0.2643, | |
| "rewards/accuracies": 0.9624999761581421, | |
| "rewards/chosen": 2.325850009918213, | |
| "rewards/margins": 115.98299407958984, | |
| "rewards/rejected": -113.65716552734375, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "learning_rate": 9.528683646750774e-08, | |
| "logits/chosen": 2.605825185775757, | |
| "logits/rejected": 3.62730073928833, | |
| "logps/chosen": -415.30926513671875, | |
| "logps/rejected": -434.9833068847656, | |
| "loss": 0.1454, | |
| "rewards/accuracies": 0.9750000238418579, | |
| "rewards/chosen": 12.735153198242188, | |
| "rewards/margins": 126.29862213134766, | |
| "rewards/rejected": -113.56346130371094, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "learning_rate": 9.51678171863842e-08, | |
| "logits/chosen": 2.488985776901245, | |
| "logits/rejected": 3.484701156616211, | |
| "logps/chosen": -426.5611877441406, | |
| "logps/rejected": -440.25946044921875, | |
| "loss": 0.5792, | |
| "rewards/accuracies": 0.9624999761581421, | |
| "rewards/chosen": 8.252630233764648, | |
| "rewards/margins": 123.63459777832031, | |
| "rewards/rejected": -115.3819580078125, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "learning_rate": 9.504879790526065e-08, | |
| "logits/chosen": 2.908905506134033, | |
| "logits/rejected": 3.2668673992156982, | |
| "logps/chosen": -379.87969970703125, | |
| "logps/rejected": -445.2496643066406, | |
| "loss": 0.3025, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 7.975564479827881, | |
| "rewards/margins": 144.19297790527344, | |
| "rewards/rejected": -136.21739196777344, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "learning_rate": 9.49297786241371e-08, | |
| "logits/chosen": 2.915165424346924, | |
| "logits/rejected": 3.0565438270568848, | |
| "logps/chosen": -418.11669921875, | |
| "logps/rejected": -470.24554443359375, | |
| "loss": 0.1609, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": 5.699041843414307, | |
| "rewards/margins": 146.64541625976562, | |
| "rewards/rejected": -140.94638061523438, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "learning_rate": 9.481075934301356e-08, | |
| "logits/chosen": 2.8868582248687744, | |
| "logits/rejected": 3.6842854022979736, | |
| "logps/chosen": -373.21185302734375, | |
| "logps/rejected": -419.702392578125, | |
| "loss": 0.1811, | |
| "rewards/accuracies": 0.9624999761581421, | |
| "rewards/chosen": -9.702957153320312, | |
| "rewards/margins": 123.20098876953125, | |
| "rewards/rejected": -132.90394592285156, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "learning_rate": 9.469174006189002e-08, | |
| "logits/chosen": 2.415966749191284, | |
| "logits/rejected": 3.7415695190429688, | |
| "logps/chosen": -477.23406982421875, | |
| "logps/rejected": -459.58343505859375, | |
| "loss": 0.1843, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": 7.450096130371094, | |
| "rewards/margins": 136.1929931640625, | |
| "rewards/rejected": -128.74290466308594, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "learning_rate": 9.457272078076648e-08, | |
| "logits/chosen": 2.6312685012817383, | |
| "logits/rejected": 3.0948596000671387, | |
| "logps/chosen": -434.4181213378906, | |
| "logps/rejected": -447.225341796875, | |
| "loss": 0.3219, | |
| "rewards/accuracies": 0.9750000238418579, | |
| "rewards/chosen": 3.2366714477539062, | |
| "rewards/margins": 148.01669311523438, | |
| "rewards/rejected": -144.780029296875, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "learning_rate": 9.445370149964293e-08, | |
| "logits/chosen": 2.789755344390869, | |
| "logits/rejected": 3.598536729812622, | |
| "logps/chosen": -400.4031982421875, | |
| "logps/rejected": -415.2806091308594, | |
| "loss": 0.2459, | |
| "rewards/accuracies": 0.949999988079071, | |
| "rewards/chosen": 13.055984497070312, | |
| "rewards/margins": 139.63107299804688, | |
| "rewards/rejected": -126.5750961303711, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "eval_logits/chosen": 1.8071796894073486, | |
| "eval_logits/rejected": 2.5058929920196533, | |
| "eval_logps/chosen": -401.0693359375, | |
| "eval_logps/rejected": -428.1545715332031, | |
| "eval_loss": 1.071203589439392, | |
| "eval_rewards/accuracies": 0.9375, | |
| "eval_rewards/chosen": 14.728099822998047, | |
| "eval_rewards/margins": 144.3882293701172, | |
| "eval_rewards/rejected": -129.66012573242188, | |
| "eval_runtime": 76.8152, | |
| "eval_samples_per_second": 13.018, | |
| "eval_steps_per_second": 0.417, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.91, | |
| "learning_rate": 9.43346822185194e-08, | |
| "logits/chosen": 2.2163331508636475, | |
| "logits/rejected": 3.5338504314422607, | |
| "logps/chosen": -404.0025329589844, | |
| "logps/rejected": -437.22613525390625, | |
| "loss": 0.2835, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 19.502986907958984, | |
| "rewards/margins": 152.5010528564453, | |
| "rewards/rejected": -132.998046875, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 0.91, | |
| "learning_rate": 9.421566293739586e-08, | |
| "logits/chosen": 2.900045156478882, | |
| "logits/rejected": 3.8935494422912598, | |
| "logps/chosen": -371.714599609375, | |
| "logps/rejected": -410.403564453125, | |
| "loss": 0.1478, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": -17.428253173828125, | |
| "rewards/margins": 138.4210968017578, | |
| "rewards/rejected": -155.84933471679688, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 9.409664365627231e-08, | |
| "logits/chosen": 2.534538984298706, | |
| "logits/rejected": 2.9124529361724854, | |
| "logps/chosen": -439.07183837890625, | |
| "logps/rejected": -433.35064697265625, | |
| "loss": 0.2963, | |
| "rewards/accuracies": 0.9624999761581421, | |
| "rewards/chosen": 5.632425308227539, | |
| "rewards/margins": 152.6386260986328, | |
| "rewards/rejected": -147.00619506835938, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "learning_rate": 9.397762437514877e-08, | |
| "logits/chosen": 2.745168685913086, | |
| "logits/rejected": 3.2941577434539795, | |
| "logps/chosen": -414.8768005371094, | |
| "logps/rejected": -474.46258544921875, | |
| "loss": 0.1725, | |
| "rewards/accuracies": 0.9750000238418579, | |
| "rewards/chosen": -17.173574447631836, | |
| "rewards/margins": 160.98873901367188, | |
| "rewards/rejected": -178.16232299804688, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "learning_rate": 9.385860509402523e-08, | |
| "logits/chosen": 2.712263584136963, | |
| "logits/rejected": 3.576184034347534, | |
| "logps/chosen": -399.8708801269531, | |
| "logps/rejected": -430.77703857421875, | |
| "loss": 0.159, | |
| "rewards/accuracies": 0.9624999761581421, | |
| "rewards/chosen": -4.633294582366943, | |
| "rewards/margins": 155.65737915039062, | |
| "rewards/rejected": -160.29067993164062, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "learning_rate": 9.373958581290168e-08, | |
| "logits/chosen": 2.4558093547821045, | |
| "logits/rejected": 3.536351442337036, | |
| "logps/chosen": -453.64324951171875, | |
| "logps/rejected": -463.8458557128906, | |
| "loss": 0.166, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": 0.3881942629814148, | |
| "rewards/margins": 154.9169464111328, | |
| "rewards/rejected": -154.52874755859375, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "learning_rate": 9.362056653177814e-08, | |
| "logits/chosen": 2.787285566329956, | |
| "logits/rejected": 3.214555025100708, | |
| "logps/chosen": -444.3168029785156, | |
| "logps/rejected": -416.84820556640625, | |
| "loss": 0.3787, | |
| "rewards/accuracies": 0.925000011920929, | |
| "rewards/chosen": 8.255861282348633, | |
| "rewards/margins": 150.24636840820312, | |
| "rewards/rejected": -141.99050903320312, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "learning_rate": 9.35015472506546e-08, | |
| "logits/chosen": 2.6908648014068604, | |
| "logits/rejected": 3.7777984142303467, | |
| "logps/chosen": -413.5292053222656, | |
| "logps/rejected": -414.0494079589844, | |
| "loss": 0.4045, | |
| "rewards/accuracies": 0.9624999761581421, | |
| "rewards/chosen": 13.289766311645508, | |
| "rewards/margins": 143.24343872070312, | |
| "rewards/rejected": -129.95367431640625, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "learning_rate": 9.338252796953105e-08, | |
| "logits/chosen": 2.289970636367798, | |
| "logits/rejected": 3.352263927459717, | |
| "logps/chosen": -403.9881591796875, | |
| "logps/rejected": -431.7518005371094, | |
| "loss": 0.378, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": 20.252124786376953, | |
| "rewards/margins": 172.3944549560547, | |
| "rewards/rejected": -152.142333984375, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "learning_rate": 9.326350868840752e-08, | |
| "logits/chosen": 3.019411563873291, | |
| "logits/rejected": 3.345460891723633, | |
| "logps/chosen": -434.126220703125, | |
| "logps/rejected": -452.2032165527344, | |
| "loss": 0.537, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 17.267078399658203, | |
| "rewards/margins": 171.45938110351562, | |
| "rewards/rejected": -154.19232177734375, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "eval_logits/chosen": 1.8465936183929443, | |
| "eval_logits/rejected": 2.4808287620544434, | |
| "eval_logps/chosen": -400.0705871582031, | |
| "eval_logps/rejected": -427.70770263671875, | |
| "eval_loss": 1.2171391248703003, | |
| "eval_rewards/accuracies": 0.9296875, | |
| "eval_rewards/chosen": 19.721826553344727, | |
| "eval_rewards/margins": 147.1477813720703, | |
| "eval_rewards/rejected": -127.42596435546875, | |
| "eval_runtime": 76.9797, | |
| "eval_samples_per_second": 12.99, | |
| "eval_steps_per_second": 0.416, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "learning_rate": 9.314448940728398e-08, | |
| "logits/chosen": 2.212067127227783, | |
| "logits/rejected": 3.642113447189331, | |
| "logps/chosen": -408.46063232421875, | |
| "logps/rejected": -423.57464599609375, | |
| "loss": 0.1855, | |
| "rewards/accuracies": 0.9750000238418579, | |
| "rewards/chosen": 38.055782318115234, | |
| "rewards/margins": 152.7517547607422, | |
| "rewards/rejected": -114.69598388671875, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "learning_rate": 9.302547012616043e-08, | |
| "logits/chosen": 2.540785074234009, | |
| "logits/rejected": 3.4431614875793457, | |
| "logps/chosen": -421.13995361328125, | |
| "logps/rejected": -475.09716796875, | |
| "loss": 0.2447, | |
| "rewards/accuracies": 0.9624999761581421, | |
| "rewards/chosen": 10.023767471313477, | |
| "rewards/margins": 167.34268188476562, | |
| "rewards/rejected": -157.3189239501953, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "learning_rate": 9.290645084503689e-08, | |
| "logits/chosen": 2.7672924995422363, | |
| "logits/rejected": 3.700913190841675, | |
| "logps/chosen": -417.6861877441406, | |
| "logps/rejected": -453.88818359375, | |
| "loss": 0.2154, | |
| "rewards/accuracies": 0.949999988079071, | |
| "rewards/chosen": 11.756277084350586, | |
| "rewards/margins": 158.40457153320312, | |
| "rewards/rejected": -146.64828491210938, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "learning_rate": 9.278743156391336e-08, | |
| "logits/chosen": 2.3169944286346436, | |
| "logits/rejected": 3.3370776176452637, | |
| "logps/chosen": -397.16961669921875, | |
| "logps/rejected": -429.99554443359375, | |
| "loss": 0.5247, | |
| "rewards/accuracies": 0.9750000238418579, | |
| "rewards/chosen": -8.517876625061035, | |
| "rewards/margins": 147.62835693359375, | |
| "rewards/rejected": -156.146240234375, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 9.26684122827898e-08, | |
| "logits/chosen": 2.920701026916504, | |
| "logits/rejected": 3.607771396636963, | |
| "logps/chosen": -409.669189453125, | |
| "logps/rejected": -427.26617431640625, | |
| "loss": 0.6167, | |
| "rewards/accuracies": 0.9750000238418579, | |
| "rewards/chosen": -11.549623489379883, | |
| "rewards/margins": 140.46511840820312, | |
| "rewards/rejected": -152.01473999023438, | |
| "step": 1550 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 9336, | |
| "num_train_epochs": 6, | |
| "save_steps": 500, | |
| "total_flos": 0.0, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |