| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.13969267611255237, | |
| "eval_steps": 500, | |
| "global_step": 100, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 6335.173723493303, | |
| "epoch": 0.0013969267611255239, | |
| "grad_norm": 0.1530565852782136, | |
| "learning_rate": 0.0, | |
| "loss": 0.0119, | |
| "num_tokens": 1330201.0, | |
| "reward": 0.3924093656241894, | |
| "reward_std": 0.32559600685323986, | |
| "rewards/reward_func": 0.3924093528517655, | |
| "step": 1 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "epoch": 0.0027938535222510478, | |
| "grad_norm": 0.15303408357738274, | |
| "learning_rate": 1.3888888888888887e-08, | |
| "loss": 0.0119, | |
| "step": 2 | |
| }, | |
| { | |
| "clip_ratio": 0.010408175710056509, | |
| "epoch": 0.004190780283376571, | |
| "grad_norm": 0.1764633199425111, | |
| "learning_rate": 2.7777777777777774e-08, | |
| "loss": 0.0125, | |
| "step": 3 | |
| }, | |
| { | |
| "clip_ratio": 0.010680466823812042, | |
| "epoch": 0.0055877070445020955, | |
| "grad_norm": 0.2275277428050235, | |
| "learning_rate": 4.166666666666666e-08, | |
| "loss": 0.0126, | |
| "step": 4 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 5737.668526785715, | |
| "epoch": 0.006984633805627619, | |
| "grad_norm": 0.19107039038527468, | |
| "learning_rate": 5.555555555555555e-08, | |
| "loss": 0.0293, | |
| "num_tokens": 2545372.0, | |
| "reward": 0.4294457712343761, | |
| "reward_std": 0.37008823986564365, | |
| "rewards/reward_func": 0.4294457531401089, | |
| "step": 5 | |
| }, | |
| { | |
| "clip_ratio": 0.010828850524766105, | |
| "epoch": 0.008381560566753142, | |
| "grad_norm": 0.2240354781456917, | |
| "learning_rate": 6.944444444444444e-08, | |
| "loss": 0.0301, | |
| "step": 6 | |
| }, | |
| { | |
| "clip_ratio": 0.010841753533376115, | |
| "epoch": 0.009778487327878668, | |
| "grad_norm": 0.2767916742737982, | |
| "learning_rate": 8.333333333333333e-08, | |
| "loss": 0.0301, | |
| "step": 7 | |
| }, | |
| { | |
| "clip_ratio": 0.010758176312914916, | |
| "epoch": 0.011175414089004191, | |
| "grad_norm": 0.2668705057271519, | |
| "learning_rate": 9.722222222222222e-08, | |
| "loss": 0.0301, | |
| "step": 8 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 6511.857439313616, | |
| "epoch": 0.012572340850129714, | |
| "grad_norm": 0.19266419886931196, | |
| "learning_rate": 1.111111111111111e-07, | |
| "loss": 0.0051, | |
| "num_tokens": 3911895.0, | |
| "reward": 0.32038674876093864, | |
| "reward_std": 0.3298808889729636, | |
| "rewards/reward_func": 0.32038673226322445, | |
| "step": 9 | |
| }, | |
| { | |
| "clip_ratio": 0.012134518141725234, | |
| "epoch": 0.013969267611255238, | |
| "grad_norm": 0.21591989333017872, | |
| "learning_rate": 1.25e-07, | |
| "loss": 0.0058, | |
| "step": 10 | |
| }, | |
| { | |
| "clip_ratio": 0.012228245115173715, | |
| "epoch": 0.015366194372380763, | |
| "grad_norm": 0.20676433229073893, | |
| "learning_rate": 1.3888888888888888e-07, | |
| "loss": 0.0058, | |
| "step": 11 | |
| }, | |
| { | |
| "clip_ratio": 0.012062976469418831, | |
| "epoch": 0.016763121133506285, | |
| "grad_norm": 0.2100849809088676, | |
| "learning_rate": 1.527777777777778e-07, | |
| "loss": 0.0058, | |
| "step": 12 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 5776.398141043527, | |
| "epoch": 0.01816004789463181, | |
| "grad_norm": 0.21639349074679098, | |
| "learning_rate": 1.6666666666666665e-07, | |
| "loss": 0.0233, | |
| "num_tokens": 5132268.0, | |
| "reward": 0.4814984883580889, | |
| "reward_std": 0.3754643425345421, | |
| "rewards/reward_func": 0.48149845749139786, | |
| "step": 13 | |
| }, | |
| { | |
| "clip_ratio": 0.010207080947501319, | |
| "epoch": 0.019556974655757335, | |
| "grad_norm": 0.22611722201173162, | |
| "learning_rate": 1.8055555555555554e-07, | |
| "loss": 0.0241, | |
| "step": 14 | |
| }, | |
| { | |
| "clip_ratio": 0.010003214618856353, | |
| "epoch": 0.02095390141688286, | |
| "grad_norm": 0.21387408972131952, | |
| "learning_rate": 1.9444444444444445e-07, | |
| "loss": 0.024, | |
| "step": 15 | |
| }, | |
| { | |
| "clip_ratio": 0.010145484004169703, | |
| "epoch": 0.022350828178008382, | |
| "grad_norm": 0.2965931721160837, | |
| "learning_rate": 2.0833333333333333e-07, | |
| "loss": 0.024, | |
| "step": 16 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 6396.923740931919, | |
| "epoch": 0.023747754939133905, | |
| "grad_norm": 0.17812701051388663, | |
| "learning_rate": 2.222222222222222e-07, | |
| "loss": 0.0143, | |
| "num_tokens": 6474962.0, | |
| "reward": 0.4179248466555561, | |
| "reward_std": 0.3605238295027188, | |
| "rewards/reward_func": 0.41792482949261156, | |
| "step": 17 | |
| }, | |
| { | |
| "clip_ratio": 0.010267588176897593, | |
| "epoch": 0.02514468170025943, | |
| "grad_norm": 0.18587492539762027, | |
| "learning_rate": 2.361111111111111e-07, | |
| "loss": 0.015, | |
| "step": 18 | |
| }, | |
| { | |
| "clip_ratio": 0.010148471055020179, | |
| "epoch": 0.026541608461384952, | |
| "grad_norm": 0.1810944495339226, | |
| "learning_rate": 2.5e-07, | |
| "loss": 0.0149, | |
| "step": 19 | |
| }, | |
| { | |
| "clip_ratio": 0.01020466532957341, | |
| "epoch": 0.027938535222510476, | |
| "grad_norm": 0.1924193708866086, | |
| "learning_rate": 2.638888888888889e-07, | |
| "loss": 0.015, | |
| "step": 20 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 5085.755362374442, | |
| "epoch": 0.029335461983636, | |
| "grad_norm": 0.3165497451368565, | |
| "learning_rate": 2.7777777777777776e-07, | |
| "loss": 0.0104, | |
| "num_tokens": 7560523.0, | |
| "reward": 0.464439152606896, | |
| "reward_std": 0.3689843277846064, | |
| "rewards/reward_func": 0.46443913131952286, | |
| "step": 21 | |
| }, | |
| { | |
| "clip_ratio": 0.010546388570219278, | |
| "epoch": 0.030732388744761526, | |
| "grad_norm": 0.3395774959095009, | |
| "learning_rate": 2.916666666666667e-07, | |
| "loss": 0.0113, | |
| "step": 22 | |
| }, | |
| { | |
| "clip_ratio": 0.010336563357019, | |
| "epoch": 0.032129315505887046, | |
| "grad_norm": 0.31582946500885495, | |
| "learning_rate": 3.055555555555556e-07, | |
| "loss": 0.0112, | |
| "step": 23 | |
| }, | |
| { | |
| "clip_ratio": 0.010099471979109305, | |
| "epoch": 0.03352624226701257, | |
| "grad_norm": 0.36247326697578075, | |
| "learning_rate": 3.194444444444444e-07, | |
| "loss": 0.0112, | |
| "step": 24 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 6461.342215401785, | |
| "epoch": 0.03492316902813809, | |
| "grad_norm": 0.15567466235487185, | |
| "learning_rate": 3.333333333333333e-07, | |
| "loss": 0.0193, | |
| "num_tokens": 8914764.0, | |
| "reward": 0.38339252876383917, | |
| "reward_std": 0.3311073939715113, | |
| "rewards/reward_func": 0.3833925181201526, | |
| "step": 25 | |
| }, | |
| { | |
| "clip_ratio": 0.009515652732391442, | |
| "epoch": 0.03632009578926362, | |
| "grad_norm": 0.25805173133838866, | |
| "learning_rate": 3.472222222222222e-07, | |
| "loss": 0.0199, | |
| "step": 26 | |
| }, | |
| { | |
| "clip_ratio": 0.009263422406677688, | |
| "epoch": 0.03771702255038915, | |
| "grad_norm": 0.4090162080972072, | |
| "learning_rate": 3.6111111111111107e-07, | |
| "loss": 0.0199, | |
| "step": 27 | |
| }, | |
| { | |
| "clip_ratio": 0.009094293162758862, | |
| "epoch": 0.03911394931151467, | |
| "grad_norm": 0.14691760483745145, | |
| "learning_rate": 3.75e-07, | |
| "loss": 0.0198, | |
| "step": 28 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 5968.250279017857, | |
| "epoch": 0.040510876072640194, | |
| "grad_norm": 0.16406943154337036, | |
| "learning_rate": 3.888888888888889e-07, | |
| "loss": 0.0115, | |
| "num_tokens": 10173741.0, | |
| "reward": 0.3594382884246962, | |
| "reward_std": 0.32283720054796766, | |
| "rewards/reward_func": 0.35943826926606043, | |
| "step": 29 | |
| }, | |
| { | |
| "clip_ratio": 0.01090411888435483, | |
| "epoch": 0.04190780283376572, | |
| "grad_norm": 0.21927196182565817, | |
| "learning_rate": 4.027777777777778e-07, | |
| "loss": 0.0124, | |
| "step": 30 | |
| }, | |
| { | |
| "clip_ratio": 0.010733458446338773, | |
| "epoch": 0.04330472959489124, | |
| "grad_norm": 0.3447213393816689, | |
| "learning_rate": 4.1666666666666667e-07, | |
| "loss": 0.0121, | |
| "step": 31 | |
| }, | |
| { | |
| "clip_ratio": 0.010630057725523199, | |
| "epoch": 0.044701656356016764, | |
| "grad_norm": 0.3620568701207513, | |
| "learning_rate": 4.3055555555555555e-07, | |
| "loss": 0.0121, | |
| "step": 32 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 5812.648210797991, | |
| "epoch": 0.04609858311714229, | |
| "grad_norm": 0.16987419224516487, | |
| "learning_rate": 4.444444444444444e-07, | |
| "loss": 0.0179, | |
| "num_tokens": 11403427.0, | |
| "reward": 0.3713971259338515, | |
| "reward_std": 0.3195132836699486, | |
| "rewards/reward_func": 0.3713971110326903, | |
| "step": 33 | |
| }, | |
| { | |
| "clip_ratio": 0.010423385953929807, | |
| "epoch": 0.04749550987826781, | |
| "grad_norm": 0.16329217057193554, | |
| "learning_rate": 4.5833333333333327e-07, | |
| "loss": 0.0185, | |
| "step": 34 | |
| }, | |
| { | |
| "clip_ratio": 0.010347368278806763, | |
| "epoch": 0.048892436639393334, | |
| "grad_norm": 0.15422959333602954, | |
| "learning_rate": 4.722222222222222e-07, | |
| "loss": 0.0185, | |
| "step": 35 | |
| }, | |
| { | |
| "clip_ratio": 0.010140369646251202, | |
| "epoch": 0.05028936340051886, | |
| "grad_norm": 0.14689712083262277, | |
| "learning_rate": 4.861111111111111e-07, | |
| "loss": 0.0184, | |
| "step": 36 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 5546.617606026785, | |
| "epoch": 0.05168629016164438, | |
| "grad_norm": 0.23439115342799705, | |
| "learning_rate": 5e-07, | |
| "loss": 0.0161, | |
| "num_tokens": 12578819.0, | |
| "reward": 0.4297656629766737, | |
| "reward_std": 0.357943703021322, | |
| "rewards/reward_func": 0.42976564009274754, | |
| "step": 37 | |
| }, | |
| { | |
| "clip_ratio": 0.010506943932601384, | |
| "epoch": 0.053083216922769905, | |
| "grad_norm": 0.28362735010406454, | |
| "learning_rate": 5.138888888888889e-07, | |
| "loss": 0.0168, | |
| "step": 38 | |
| }, | |
| { | |
| "clip_ratio": 0.010338681018246072, | |
| "epoch": 0.05448014368389543, | |
| "grad_norm": 0.22707404637725537, | |
| "learning_rate": 5.277777777777777e-07, | |
| "loss": 0.0167, | |
| "step": 39 | |
| }, | |
| { | |
| "clip_ratio": 0.010024449788033962, | |
| "epoch": 0.05587707044502095, | |
| "grad_norm": 0.17811231341381648, | |
| "learning_rate": 5.416666666666666e-07, | |
| "loss": 0.0165, | |
| "step": 40 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 6501.444231305803, | |
| "epoch": 0.057273997206146475, | |
| "grad_norm": 0.19673275542337104, | |
| "learning_rate": 5.555555555555555e-07, | |
| "loss": 0.0203, | |
| "num_tokens": 13942392.0, | |
| "reward": 0.39034509126629147, | |
| "reward_std": 0.3664030634931156, | |
| "rewards/reward_func": 0.3903450625283377, | |
| "step": 41 | |
| }, | |
| { | |
| "clip_ratio": 0.010402361529746227, | |
| "epoch": 0.058670923967272, | |
| "grad_norm": 0.46687244963414065, | |
| "learning_rate": 5.694444444444444e-07, | |
| "loss": 0.0211, | |
| "step": 42 | |
| }, | |
| { | |
| "clip_ratio": 0.010496154893189669, | |
| "epoch": 0.06006785072839753, | |
| "grad_norm": 0.19428194405187207, | |
| "learning_rate": 5.833333333333334e-07, | |
| "loss": 0.0209, | |
| "step": 43 | |
| }, | |
| { | |
| "clip_ratio": 0.010158670766811286, | |
| "epoch": 0.06146477748952305, | |
| "grad_norm": 0.15306555166745645, | |
| "learning_rate": 5.972222222222222e-07, | |
| "loss": 0.0208, | |
| "step": 44 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 6369.643048967634, | |
| "epoch": 0.06286170425064858, | |
| "grad_norm": 0.1871015916141642, | |
| "learning_rate": 6.111111111111112e-07, | |
| "loss": 0.0161, | |
| "num_tokens": 15279078.0, | |
| "reward": 0.36025070399045944, | |
| "reward_std": 0.35143060450042996, | |
| "rewards/reward_func": 0.360250677381243, | |
| "step": 45 | |
| }, | |
| { | |
| "clip_ratio": 0.01079715442444597, | |
| "epoch": 0.06425863101177409, | |
| "grad_norm": 0.6294866160536174, | |
| "learning_rate": 6.249999999999999e-07, | |
| "loss": 0.0169, | |
| "step": 46 | |
| }, | |
| { | |
| "clip_ratio": 0.010736021545848675, | |
| "epoch": 0.06565555777289962, | |
| "grad_norm": 2.137657977104326, | |
| "learning_rate": 6.388888888888888e-07, | |
| "loss": 0.0171, | |
| "step": 47 | |
| }, | |
| { | |
| "clip_ratio": 0.010864090407267213, | |
| "epoch": 0.06705248453402514, | |
| "grad_norm": 0.18428434234054034, | |
| "learning_rate": 6.527777777777777e-07, | |
| "loss": 0.0166, | |
| "step": 48 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 6655.908412388393, | |
| "epoch": 0.06844941129515067, | |
| "grad_norm": 0.16953542603782387, | |
| "learning_rate": 6.666666666666666e-07, | |
| "loss": 0.0148, | |
| "num_tokens": 16672949.0, | |
| "reward": 0.35046685327376637, | |
| "reward_std": 0.30536604832325664, | |
| "rewards/reward_func": 0.3504668373082365, | |
| "step": 49 | |
| }, | |
| { | |
| "clip_ratio": 0.010905002310339893, | |
| "epoch": 0.06984633805627619, | |
| "grad_norm": 1.9607105946099048, | |
| "learning_rate": 6.805555555555556e-07, | |
| "loss": 0.0153, | |
| "step": 50 | |
| }, | |
| { | |
| "clip_ratio": 0.011137549964977162, | |
| "epoch": 0.07124326481740172, | |
| "grad_norm": 0.16619610980435887, | |
| "learning_rate": 6.944444444444444e-07, | |
| "loss": 0.0153, | |
| "step": 51 | |
| }, | |
| { | |
| "clip_ratio": 0.011474365035870246, | |
| "epoch": 0.07264019157852725, | |
| "grad_norm": 0.13731779782413311, | |
| "learning_rate": 7.083333333333334e-07, | |
| "loss": 0.0151, | |
| "step": 52 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 6259.326799665178, | |
| "epoch": 0.07403711833965276, | |
| "grad_norm": 0.18005417578145988, | |
| "learning_rate": 7.222222222222221e-07, | |
| "loss": 0.0124, | |
| "num_tokens": 17987081.0, | |
| "reward": 0.3450017161667347, | |
| "reward_std": 0.3385175902928625, | |
| "rewards/reward_func": 0.34500169487936155, | |
| "step": 53 | |
| }, | |
| { | |
| "clip_ratio": 0.012061000069869416, | |
| "epoch": 0.0754340451007783, | |
| "grad_norm": 0.3653574862365132, | |
| "learning_rate": 7.361111111111111e-07, | |
| "loss": 0.0132, | |
| "step": 54 | |
| }, | |
| { | |
| "clip_ratio": 0.01210923127031752, | |
| "epoch": 0.07683097186190381, | |
| "grad_norm": 0.1572526876806962, | |
| "learning_rate": 7.5e-07, | |
| "loss": 0.013, | |
| "step": 55 | |
| }, | |
| { | |
| "clip_ratio": 0.012545851857534476, | |
| "epoch": 0.07822789862302934, | |
| "grad_norm": 0.1348151811096135, | |
| "learning_rate": 7.638888888888888e-07, | |
| "loss": 0.0128, | |
| "step": 56 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 6945.081944056919, | |
| "epoch": 0.07962482538415486, | |
| "grad_norm": 0.1355243529384343, | |
| "learning_rate": 7.777777777777778e-07, | |
| "loss": 0.0203, | |
| "num_tokens": 19437527.0, | |
| "reward": 0.2853341962077788, | |
| "reward_std": 0.28026825402464184, | |
| "rewards/reward_func": 0.28533417971006464, | |
| "step": 57 | |
| }, | |
| { | |
| "clip_ratio": 0.010294904433456915, | |
| "epoch": 0.08102175214528039, | |
| "grad_norm": 1.4215854683138216, | |
| "learning_rate": 7.916666666666666e-07, | |
| "loss": 0.0211, | |
| "step": 58 | |
| }, | |
| { | |
| "clip_ratio": 0.010296880932790893, | |
| "epoch": 0.0824186789064059, | |
| "grad_norm": 0.1328390444888187, | |
| "learning_rate": 8.055555555555556e-07, | |
| "loss": 0.0208, | |
| "step": 59 | |
| }, | |
| { | |
| "clip_ratio": 0.010402131692639418, | |
| "epoch": 0.08381560566753143, | |
| "grad_norm": 0.10630427559017747, | |
| "learning_rate": 8.194444444444443e-07, | |
| "loss": 0.0207, | |
| "step": 60 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 5832.984967912947, | |
| "epoch": 0.08521253242865695, | |
| "grad_norm": 0.2568979639198368, | |
| "learning_rate": 8.333333333333333e-07, | |
| "loss": 0.0135, | |
| "num_tokens": 20669861.0, | |
| "reward": 0.37696379157049315, | |
| "reward_std": 0.3524509519338608, | |
| "rewards/reward_func": 0.37696376868656706, | |
| "step": 61 | |
| }, | |
| { | |
| "clip_ratio": 0.011475847900978156, | |
| "epoch": 0.08660945918978248, | |
| "grad_norm": 0.26568198717380825, | |
| "learning_rate": 8.472222222222222e-07, | |
| "loss": 0.0142, | |
| "step": 62 | |
| }, | |
| { | |
| "clip_ratio": 0.011013164584125792, | |
| "epoch": 0.088006385950908, | |
| "grad_norm": 0.5160311893436309, | |
| "learning_rate": 8.611111111111111e-07, | |
| "loss": 0.014, | |
| "step": 63 | |
| }, | |
| { | |
| "clip_ratio": 0.010899153238694583, | |
| "epoch": 0.08940331271203353, | |
| "grad_norm": 0.16319985648001487, | |
| "learning_rate": 8.75e-07, | |
| "loss": 0.0138, | |
| "step": 64 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 5130.444091796875, | |
| "epoch": 0.09080023947315904, | |
| "grad_norm": 0.28767641683692746, | |
| "learning_rate": 8.888888888888888e-07, | |
| "loss": 0.0025, | |
| "num_tokens": 21764127.0, | |
| "reward": 0.46981193338121685, | |
| "reward_std": 0.3410501836666039, | |
| "rewards/reward_func": 0.4698119152869497, | |
| "step": 65 | |
| }, | |
| { | |
| "clip_ratio": 0.011167634411581926, | |
| "epoch": 0.09219716623428457, | |
| "grad_norm": 1786685.0973493713, | |
| "learning_rate": 9.027777777777778e-07, | |
| "loss": 31.3025, | |
| "step": 66 | |
| }, | |
| { | |
| "clip_ratio": 0.011178254083331143, | |
| "epoch": 0.09359409299541009, | |
| "grad_norm": 0.2699319084211939, | |
| "learning_rate": 9.166666666666665e-07, | |
| "loss": 0.0029, | |
| "step": 67 | |
| }, | |
| { | |
| "clip_ratio": 0.010996093108717884, | |
| "epoch": 0.09499101975653562, | |
| "grad_norm": 0.1796173018225043, | |
| "learning_rate": 9.305555555555555e-07, | |
| "loss": 0.0026, | |
| "step": 68 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 5374.097150530134, | |
| "epoch": 0.09638794651766115, | |
| "grad_norm": 0.29248413368918635, | |
| "learning_rate": 9.444444444444444e-07, | |
| "loss": 0.0204, | |
| "num_tokens": 22905462.0, | |
| "reward": 0.42480308030332836, | |
| "reward_std": 0.3633535067949976, | |
| "rewards/reward_func": 0.4248030515653746, | |
| "step": 69 | |
| }, | |
| { | |
| "clip_ratio": 0.010870669968426228, | |
| "epoch": 0.09778487327878667, | |
| "grad_norm": 0.2916766016433164, | |
| "learning_rate": 9.583333333333334e-07, | |
| "loss": 0.0211, | |
| "step": 70 | |
| }, | |
| { | |
| "clip_ratio": 0.010642989072948694, | |
| "epoch": 0.0991818000399122, | |
| "grad_norm": 0.2574669164587784, | |
| "learning_rate": 9.722222222222222e-07, | |
| "loss": 0.0209, | |
| "step": 71 | |
| }, | |
| { | |
| "clip_ratio": 0.010603911941871047, | |
| "epoch": 0.10057872680103772, | |
| "grad_norm": 0.3923293286285201, | |
| "learning_rate": 9.861111111111112e-07, | |
| "loss": 0.0206, | |
| "step": 72 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 4780.178763253348, | |
| "epoch": 0.10197565356216325, | |
| "grad_norm": 0.3357433852104651, | |
| "learning_rate": 1e-06, | |
| "loss": 0.0111, | |
| "num_tokens": 23928872.0, | |
| "reward": 0.49998574065310614, | |
| "reward_std": 0.3773101898176329, | |
| "rewards/reward_func": 0.4999857119151524, | |
| "step": 73 | |
| }, | |
| { | |
| "clip_ratio": 0.012297127628698945, | |
| "epoch": 0.10337258032328876, | |
| "grad_norm": 0.24331462448033314, | |
| "learning_rate": 1e-06, | |
| "loss": 0.0117, | |
| "step": 74 | |
| }, | |
| { | |
| "clip_ratio": 0.012708199177203434, | |
| "epoch": 0.10476950708441429, | |
| "grad_norm": 0.6107245136629853, | |
| "learning_rate": 1e-06, | |
| "loss": 0.0114, | |
| "step": 75 | |
| }, | |
| { | |
| "clip_ratio": 0.013628917180800013, | |
| "epoch": 0.10616643384553981, | |
| "grad_norm": 0.1571665107730879, | |
| "learning_rate": 1e-06, | |
| "loss": 0.0112, | |
| "step": 76 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 5568.459463936942, | |
| "epoch": 0.10756336060666534, | |
| "grad_norm": 0.3303048447870775, | |
| "learning_rate": 1e-06, | |
| "loss": 0.0085, | |
| "num_tokens": 25108534.0, | |
| "reward": 0.3940093676958765, | |
| "reward_std": 0.32062976009079386, | |
| "rewards/reward_func": 0.39400935066597803, | |
| "step": 77 | |
| }, | |
| { | |
| "clip_ratio": 0.012926211846726281, | |
| "epoch": 0.10896028736779086, | |
| "grad_norm": 0.4586726422820888, | |
| "learning_rate": 1e-06, | |
| "loss": 0.0092, | |
| "step": 78 | |
| }, | |
| { | |
| "clip_ratio": 0.013429554013003196, | |
| "epoch": 0.11035721412891639, | |
| "grad_norm": 0.46850871504202196, | |
| "learning_rate": 1e-06, | |
| "loss": 0.0089, | |
| "step": 79 | |
| }, | |
| { | |
| "clip_ratio": 0.01421830172850085, | |
| "epoch": 0.1117541408900419, | |
| "grad_norm": 0.2694884119514331, | |
| "learning_rate": 1e-06, | |
| "loss": 0.0087, | |
| "step": 80 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 5048.066615513393, | |
| "epoch": 0.11315106765116743, | |
| "grad_norm": 0.4818094588291497, | |
| "learning_rate": 1e-06, | |
| "loss": 0.0003, | |
| "num_tokens": 26188824.0, | |
| "reward": 0.4587076764021601, | |
| "reward_std": 0.3670096014227186, | |
| "rewards/reward_func": 0.45870765511478695, | |
| "step": 81 | |
| }, | |
| { | |
| "clip_ratio": 0.014440144545265607, | |
| "epoch": 0.11454799441229295, | |
| "grad_norm": 39.00637105645605, | |
| "learning_rate": 1e-06, | |
| "loss": 0.0041, | |
| "step": 82 | |
| }, | |
| { | |
| "clip_ratio": 0.014427063853612967, | |
| "epoch": 0.11594492117341848, | |
| "grad_norm": 0.9150065270465777, | |
| "learning_rate": 1e-06, | |
| "loss": 0.0009, | |
| "step": 83 | |
| }, | |
| { | |
| "clip_ratio": 0.015046841731028897, | |
| "epoch": 0.117341847934544, | |
| "grad_norm": 0.24009263771304865, | |
| "learning_rate": 1e-06, | |
| "loss": 0.0007, | |
| "step": 84 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 4776.74507359096, | |
| "epoch": 0.11873877469566953, | |
| "grad_norm": 0.4772391378994009, | |
| "learning_rate": 1e-06, | |
| "loss": 0.0175, | |
| "num_tokens": 27213245.0, | |
| "reward": 0.46243097526686533, | |
| "reward_std": 0.3514525145292282, | |
| "rewards/reward_func": 0.4624309518507549, | |
| "step": 85 | |
| }, | |
| { | |
| "clip_ratio": 0.013508185378408857, | |
| "epoch": 0.12013570145679506, | |
| "grad_norm": 166639.4843736821, | |
| "learning_rate": 1e-06, | |
| "loss": 7.3176, | |
| "step": 86 | |
| }, | |
| { | |
| "clip_ratio": 0.013775874627754092, | |
| "epoch": 0.12153262821792057, | |
| "grad_norm": 27.738301939542676, | |
| "learning_rate": 1e-06, | |
| "loss": 0.019, | |
| "step": 87 | |
| }, | |
| { | |
| "clip_ratio": 0.014050438667514495, | |
| "epoch": 0.1229295549790461, | |
| "grad_norm": 2.3910898137109857, | |
| "learning_rate": 1e-06, | |
| "loss": 0.0182, | |
| "step": 88 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 4914.035984584263, | |
| "epoch": 0.12432648174017162, | |
| "grad_norm": 0.5313282819608155, | |
| "learning_rate": 1e-06, | |
| "loss": 0.0136, | |
| "num_tokens": 28265422.0, | |
| "reward": 0.493380460355963, | |
| "reward_std": 0.39978101530245375, | |
| "rewards/reward_func": 0.4933804316180093, | |
| "step": 89 | |
| }, | |
| { | |
| "clip_ratio": 0.017403810085462674, | |
| "epoch": 0.12572340850129715, | |
| "grad_norm": 2.5053609302596263, | |
| "learning_rate": 1e-06, | |
| "loss": 0.0153, | |
| "step": 90 | |
| }, | |
| { | |
| "clip_ratio": 0.018204916534679278, | |
| "epoch": 0.12712033526242267, | |
| "grad_norm": 1.3673142623484746, | |
| "learning_rate": 1e-06, | |
| "loss": 0.0147, | |
| "step": 91 | |
| }, | |
| { | |
| "clip_ratio": 0.019155576026865413, | |
| "epoch": 0.12851726202354818, | |
| "grad_norm": 0.285245341653243, | |
| "learning_rate": 1e-06, | |
| "loss": 0.0145, | |
| "step": 92 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 4607.285940987723, | |
| "epoch": 0.12991418878467373, | |
| "grad_norm": 0.5382478014891808, | |
| "learning_rate": 1e-06, | |
| "loss": 0.0251, | |
| "num_tokens": 29256889.0, | |
| "reward": 0.4975069910287857, | |
| "reward_std": 0.39376414673668997, | |
| "rewards/reward_func": 0.4975069612264633, | |
| "step": 93 | |
| }, | |
| { | |
| "clip_ratio": 0.017131159414670298, | |
| "epoch": 0.13131111554579925, | |
| "grad_norm": 277.06893594236533, | |
| "learning_rate": 1e-06, | |
| "loss": 0.041, | |
| "step": 94 | |
| }, | |
| { | |
| "clip_ratio": 0.017470890456544503, | |
| "epoch": 0.13270804230692476, | |
| "grad_norm": 0.595127984757126, | |
| "learning_rate": 1e-06, | |
| "loss": 0.0264, | |
| "step": 95 | |
| }, | |
| { | |
| "clip_ratio": 0.017918821956430162, | |
| "epoch": 0.13410496906805028, | |
| "grad_norm": 0.3727932311400719, | |
| "learning_rate": 1e-06, | |
| "loss": 0.026, | |
| "step": 96 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 5105.030866350447, | |
| "epoch": 0.13550189582917582, | |
| "grad_norm": 0.5204808748332284, | |
| "learning_rate": 1e-06, | |
| "loss": 0.0166, | |
| "num_tokens": 30346587.0, | |
| "reward": 0.40901294563497814, | |
| "reward_std": 0.3415069899388722, | |
| "rewards/reward_func": 0.4090129222188677, | |
| "step": 97 | |
| }, | |
| { | |
| "clip_ratio": 0.0174449899766062, | |
| "epoch": 0.13689882259030134, | |
| "grad_norm": 1.1528181627310667, | |
| "learning_rate": 1e-06, | |
| "loss": 0.0183, | |
| "step": 98 | |
| }, | |
| { | |
| "clip_ratio": 0.01785454393497535, | |
| "epoch": 0.13829574935142686, | |
| "grad_norm": 0.6193098655808096, | |
| "learning_rate": 1e-06, | |
| "loss": 0.0177, | |
| "step": 99 | |
| }, | |
| { | |
| "clip_ratio": 0.018185677073363746, | |
| "epoch": 0.13969267611255237, | |
| "grad_norm": 0.29035255969562146, | |
| "learning_rate": 1e-06, | |
| "loss": 0.0174, | |
| "step": 100 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 715, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |