| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 9.602150537634408, | |
| "eval_steps": 500, | |
| "global_step": 230, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.043010752688172046, | |
| "grad_norm": 11.91717546170225, | |
| "learning_rate": 0.0, | |
| "loss": 0.7999, | |
| "mean_token_accuracy": 0.7625279724597931, | |
| "num_tokens": 367812.0, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.08602150537634409, | |
| "grad_norm": 9.023052509105142, | |
| "learning_rate": 8.695652173913044e-07, | |
| "loss": 0.8777, | |
| "mean_token_accuracy": 0.7521283626556396, | |
| "num_tokens": 746486.0, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.12903225806451613, | |
| "grad_norm": 8.573471835574493, | |
| "learning_rate": 1.7391304347826088e-06, | |
| "loss": 0.8113, | |
| "mean_token_accuracy": 0.749506875872612, | |
| "num_tokens": 1126566.0, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.17204301075268819, | |
| "grad_norm": 5.577877324067109, | |
| "learning_rate": 2.6086956521739132e-06, | |
| "loss": 0.7698, | |
| "mean_token_accuracy": 0.7678339183330536, | |
| "num_tokens": 1507062.0, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.21505376344086022, | |
| "grad_norm": 4.129947299735385, | |
| "learning_rate": 3.4782608695652175e-06, | |
| "loss": 0.7601, | |
| "mean_token_accuracy": 0.7700327038764954, | |
| "num_tokens": 1881779.0, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.25806451612903225, | |
| "grad_norm": 3.4130681291525984, | |
| "learning_rate": 4.347826086956522e-06, | |
| "loss": 0.7044, | |
| "mean_token_accuracy": 0.7722686380147934, | |
| "num_tokens": 2265581.0, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.3010752688172043, | |
| "grad_norm": 2.9935342196668655, | |
| "learning_rate": 5.2173913043478265e-06, | |
| "loss": 0.66, | |
| "mean_token_accuracy": 0.7871796786785126, | |
| "num_tokens": 2646672.0, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.34408602150537637, | |
| "grad_norm": 2.4639857428718863, | |
| "learning_rate": 6.086956521739132e-06, | |
| "loss": 0.6528, | |
| "mean_token_accuracy": 0.7861378192901611, | |
| "num_tokens": 3024415.0, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.3870967741935484, | |
| "grad_norm": 2.1893799102976637, | |
| "learning_rate": 6.956521739130435e-06, | |
| "loss": 0.6118, | |
| "mean_token_accuracy": 0.7930850088596344, | |
| "num_tokens": 3400301.0, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.43010752688172044, | |
| "grad_norm": 2.186514396506674, | |
| "learning_rate": 7.82608695652174e-06, | |
| "loss": 0.589, | |
| "mean_token_accuracy": 0.798459991812706, | |
| "num_tokens": 3782217.0, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.4731182795698925, | |
| "grad_norm": 3.0465126347830602, | |
| "learning_rate": 8.695652173913044e-06, | |
| "loss": 0.6752, | |
| "mean_token_accuracy": 0.7804096639156342, | |
| "num_tokens": 4150420.0, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.5161290322580645, | |
| "grad_norm": 2.6537477460552275, | |
| "learning_rate": 9.565217391304349e-06, | |
| "loss": 0.6027, | |
| "mean_token_accuracy": 0.7908397912979126, | |
| "num_tokens": 4524870.0, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.5591397849462365, | |
| "grad_norm": 2.396118650881142, | |
| "learning_rate": 1.0434782608695653e-05, | |
| "loss": 0.5859, | |
| "mean_token_accuracy": 0.792891800403595, | |
| "num_tokens": 4916036.0, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.6021505376344086, | |
| "grad_norm": 2.2226168829192114, | |
| "learning_rate": 1.1304347826086957e-05, | |
| "loss": 0.5514, | |
| "mean_token_accuracy": 0.8134036660194397, | |
| "num_tokens": 5284361.0, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.6451612903225806, | |
| "grad_norm": 1.8673665641432782, | |
| "learning_rate": 1.2173913043478263e-05, | |
| "loss": 0.5486, | |
| "mean_token_accuracy": 0.8076689690351486, | |
| "num_tokens": 5648932.0, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.6881720430107527, | |
| "grad_norm": 1.8442596020009865, | |
| "learning_rate": 1.3043478260869566e-05, | |
| "loss": 0.5263, | |
| "mean_token_accuracy": 0.8252855390310287, | |
| "num_tokens": 6013624.0, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.7311827956989247, | |
| "grad_norm": 1.7829423593903526, | |
| "learning_rate": 1.391304347826087e-05, | |
| "loss": 0.5096, | |
| "mean_token_accuracy": 0.8202553987503052, | |
| "num_tokens": 6392059.0, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.7741935483870968, | |
| "grad_norm": 1.8090352579295244, | |
| "learning_rate": 1.4782608695652174e-05, | |
| "loss": 0.521, | |
| "mean_token_accuracy": 0.8225749135017395, | |
| "num_tokens": 6790097.0, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.8172043010752689, | |
| "grad_norm": 1.75919933696252, | |
| "learning_rate": 1.565217391304348e-05, | |
| "loss": 0.4948, | |
| "mean_token_accuracy": 0.828051283955574, | |
| "num_tokens": 7160612.0, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.8602150537634409, | |
| "grad_norm": 1.9385645040102704, | |
| "learning_rate": 1.6521739130434785e-05, | |
| "loss": 0.5251, | |
| "mean_token_accuracy": 0.8155874758958817, | |
| "num_tokens": 7527524.0, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.9032258064516129, | |
| "grad_norm": 1.8027184161622232, | |
| "learning_rate": 1.739130434782609e-05, | |
| "loss": 0.5057, | |
| "mean_token_accuracy": 0.8297092467546463, | |
| "num_tokens": 7888136.0, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.946236559139785, | |
| "grad_norm": 2.1820648482478187, | |
| "learning_rate": 1.8260869565217393e-05, | |
| "loss": 0.5036, | |
| "mean_token_accuracy": 0.8194401413202286, | |
| "num_tokens": 8267314.0, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.989247311827957, | |
| "grad_norm": 1.9518940278276342, | |
| "learning_rate": 1.9130434782608697e-05, | |
| "loss": 0.4965, | |
| "mean_token_accuracy": 0.8308457583189011, | |
| "num_tokens": 8644626.0, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 1.9518940278276342, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0981, | |
| "mean_token_accuracy": 0.8572691082954407, | |
| "num_tokens": 8735951.0, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 1.043010752688172, | |
| "grad_norm": 1.658616504963653, | |
| "learning_rate": 1.999884834944106e-05, | |
| "loss": 0.3613, | |
| "mean_token_accuracy": 0.8678812235593796, | |
| "num_tokens": 9111064.0, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 1.086021505376344, | |
| "grad_norm": 1.6457610214496745, | |
| "learning_rate": 1.9995393663024054e-05, | |
| "loss": 0.3551, | |
| "mean_token_accuracy": 0.8692608177661896, | |
| "num_tokens": 9496130.0, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 1.129032258064516, | |
| "grad_norm": 3.5904835403286754, | |
| "learning_rate": 1.9989636736467278e-05, | |
| "loss": 0.3695, | |
| "mean_token_accuracy": 0.866924524307251, | |
| "num_tokens": 9858972.0, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 1.1720430107526882, | |
| "grad_norm": 1.7874385949477343, | |
| "learning_rate": 1.9981578895764272e-05, | |
| "loss": 0.3212, | |
| "mean_token_accuracy": 0.890799954533577, | |
| "num_tokens": 10231593.0, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 1.2150537634408602, | |
| "grad_norm": 1.8049361403495439, | |
| "learning_rate": 1.9971221996878395e-05, | |
| "loss": 0.3554, | |
| "mean_token_accuracy": 0.8701900094747543, | |
| "num_tokens": 10601477.0, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 1.2580645161290323, | |
| "grad_norm": 1.3711396350791194, | |
| "learning_rate": 1.9958568425315316e-05, | |
| "loss": 0.309, | |
| "mean_token_accuracy": 0.8979966044425964, | |
| "num_tokens": 10971400.0, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.3010752688172043, | |
| "grad_norm": 1.5009618715368216, | |
| "learning_rate": 1.9943621095573588e-05, | |
| "loss": 0.3301, | |
| "mean_token_accuracy": 0.8754479885101318, | |
| "num_tokens": 11338917.0, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 1.3440860215053765, | |
| "grad_norm": 1.4417013210198348, | |
| "learning_rate": 1.9926383450473344e-05, | |
| "loss": 0.2909, | |
| "mean_token_accuracy": 0.8934924155473709, | |
| "num_tokens": 11708711.0, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 1.3870967741935485, | |
| "grad_norm": 1.3285539132923128, | |
| "learning_rate": 1.9906859460363307e-05, | |
| "loss": 0.3018, | |
| "mean_token_accuracy": 0.8906232416629791, | |
| "num_tokens": 12088883.0, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 1.4301075268817205, | |
| "grad_norm": 1.5218190255666082, | |
| "learning_rate": 1.9885053622206305e-05, | |
| "loss": 0.3363, | |
| "mean_token_accuracy": 0.8796575218439102, | |
| "num_tokens": 12468165.0, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 1.4731182795698925, | |
| "grad_norm": 1.3816402287843272, | |
| "learning_rate": 1.986097095854347e-05, | |
| "loss": 0.3157, | |
| "mean_token_accuracy": 0.8860495537519455, | |
| "num_tokens": 12841352.0, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 1.5161290322580645, | |
| "grad_norm": 1.442754299402919, | |
| "learning_rate": 1.9834617016337424e-05, | |
| "loss": 0.3211, | |
| "mean_token_accuracy": 0.8844448328018188, | |
| "num_tokens": 13227719.0, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 1.5591397849462365, | |
| "grad_norm": 1.4155340183010523, | |
| "learning_rate": 1.9805997865694616e-05, | |
| "loss": 0.3453, | |
| "mean_token_accuracy": 0.8787361979484558, | |
| "num_tokens": 13607472.0, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 1.6021505376344085, | |
| "grad_norm": 1.1437615335312725, | |
| "learning_rate": 1.9775120098467212e-05, | |
| "loss": 0.2762, | |
| "mean_token_accuracy": 0.9029232263565063, | |
| "num_tokens": 13982708.0, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 1.6451612903225805, | |
| "grad_norm": 1.3406964229739868, | |
| "learning_rate": 1.9741990826734793e-05, | |
| "loss": 0.2998, | |
| "mean_token_accuracy": 0.8943506330251694, | |
| "num_tokens": 14364424.0, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 1.6881720430107527, | |
| "grad_norm": 1.25822076002706, | |
| "learning_rate": 1.970661768116622e-05, | |
| "loss": 0.2537, | |
| "mean_token_accuracy": 0.9153124392032623, | |
| "num_tokens": 14755621.0, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.7311827956989247, | |
| "grad_norm": 1.1260474975551218, | |
| "learning_rate": 1.9669008809262064e-05, | |
| "loss": 0.2936, | |
| "mean_token_accuracy": 0.8934088349342346, | |
| "num_tokens": 15127506.0, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 1.7741935483870968, | |
| "grad_norm": 1.1356922171937402, | |
| "learning_rate": 1.9629172873477995e-05, | |
| "loss": 0.2639, | |
| "mean_token_accuracy": 0.9112239480018616, | |
| "num_tokens": 15503327.0, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 1.817204301075269, | |
| "grad_norm": 1.1298046162246171, | |
| "learning_rate": 1.9587119049229558e-05, | |
| "loss": 0.29, | |
| "mean_token_accuracy": 0.8936085551977158, | |
| "num_tokens": 15880530.0, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 1.860215053763441, | |
| "grad_norm": 1.1389639144335877, | |
| "learning_rate": 1.954285702277879e-05, | |
| "loss": 0.2934, | |
| "mean_token_accuracy": 0.8923312425613403, | |
| "num_tokens": 16263155.0, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 1.903225806451613, | |
| "grad_norm": 1.0066342748134163, | |
| "learning_rate": 1.9496396989003195e-05, | |
| "loss": 0.2798, | |
| "mean_token_accuracy": 0.8991601765155792, | |
| "num_tokens": 16634276.0, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 1.946236559139785, | |
| "grad_norm": 1.2385856620277536, | |
| "learning_rate": 1.944774964904754e-05, | |
| "loss": 0.3036, | |
| "mean_token_accuracy": 0.8929028362035751, | |
| "num_tokens": 17013425.0, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 1.989247311827957, | |
| "grad_norm": 1.0574616204979852, | |
| "learning_rate": 1.9396926207859085e-05, | |
| "loss": 0.2916, | |
| "mean_token_accuracy": 0.8939765691757202, | |
| "num_tokens": 17380847.0, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 1.0574616204979852, | |
| "learning_rate": 1.9343938371606714e-05, | |
| "loss": 0.0465, | |
| "mean_token_accuracy": 0.9277108311653137, | |
| "num_tokens": 17474187.0, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 2.043010752688172, | |
| "grad_norm": 1.1539619815555024, | |
| "learning_rate": 1.9288798344984673e-05, | |
| "loss": 0.1697, | |
| "mean_token_accuracy": 0.9405381679534912, | |
| "num_tokens": 17848143.0, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 2.086021505376344, | |
| "grad_norm": 0.9946315524935219, | |
| "learning_rate": 1.9231518828401458e-05, | |
| "loss": 0.1587, | |
| "mean_token_accuracy": 0.946861207485199, | |
| "num_tokens": 18231846.0, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 2.129032258064516, | |
| "grad_norm": 1.0205876962667297, | |
| "learning_rate": 1.917211301505453e-05, | |
| "loss": 0.1631, | |
| "mean_token_accuracy": 0.9437925517559052, | |
| "num_tokens": 18629481.0, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 2.172043010752688, | |
| "grad_norm": 1.2180236071671326, | |
| "learning_rate": 1.911059458789152e-05, | |
| "loss": 0.1596, | |
| "mean_token_accuracy": 0.9489908963441849, | |
| "num_tokens": 19012798.0, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 2.21505376344086, | |
| "grad_norm": 1.1932774403236657, | |
| "learning_rate": 1.9046977716458627e-05, | |
| "loss": 0.1524, | |
| "mean_token_accuracy": 0.9486334323883057, | |
| "num_tokens": 19397188.0, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 2.258064516129032, | |
| "grad_norm": 1.1190279919994257, | |
| "learning_rate": 1.8981277053636963e-05, | |
| "loss": 0.1767, | |
| "mean_token_accuracy": 0.9350408464670181, | |
| "num_tokens": 19779411.0, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 2.3010752688172045, | |
| "grad_norm": 1.0935887572069407, | |
| "learning_rate": 1.891350773226754e-05, | |
| "loss": 0.1575, | |
| "mean_token_accuracy": 0.9455375522375107, | |
| "num_tokens": 20148217.0, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 2.3440860215053765, | |
| "grad_norm": 0.8268906453812284, | |
| "learning_rate": 1.8843685361665724e-05, | |
| "loss": 0.1495, | |
| "mean_token_accuracy": 0.9489219486713409, | |
| "num_tokens": 20517539.0, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 2.3870967741935485, | |
| "grad_norm": 0.8463600848872256, | |
| "learning_rate": 1.8771826024025944e-05, | |
| "loss": 0.1625, | |
| "mean_token_accuracy": 0.9414703994989395, | |
| "num_tokens": 20886139.0, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 2.4301075268817205, | |
| "grad_norm": 0.8514202559815353, | |
| "learning_rate": 1.8697946270717468e-05, | |
| "loss": 0.1444, | |
| "mean_token_accuracy": 0.9490186870098114, | |
| "num_tokens": 21255999.0, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 2.4731182795698925, | |
| "grad_norm": 0.8543888251991751, | |
| "learning_rate": 1.8622063118472135e-05, | |
| "loss": 0.1714, | |
| "mean_token_accuracy": 0.9381113350391388, | |
| "num_tokens": 21626463.0, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 2.5161290322580645, | |
| "grad_norm": 0.9035063674891346, | |
| "learning_rate": 1.8544194045464888e-05, | |
| "loss": 0.1843, | |
| "mean_token_accuracy": 0.9337188005447388, | |
| "num_tokens": 21991893.0, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 2.5591397849462365, | |
| "grad_norm": 0.8418292533035925, | |
| "learning_rate": 1.8464356987288012e-05, | |
| "loss": 0.1526, | |
| "mean_token_accuracy": 0.9460138976573944, | |
| "num_tokens": 22361724.0, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 2.6021505376344085, | |
| "grad_norm": 0.7418502959925466, | |
| "learning_rate": 1.8382570332820045e-05, | |
| "loss": 0.142, | |
| "mean_token_accuracy": 0.9545767903327942, | |
| "num_tokens": 22736647.0, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 2.6451612903225805, | |
| "grad_norm": 0.9322325837904922, | |
| "learning_rate": 1.8298852919990254e-05, | |
| "loss": 0.1498, | |
| "mean_token_accuracy": 0.9440304934978485, | |
| "num_tokens": 23100836.0, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 2.688172043010753, | |
| "grad_norm": 0.7735956031556342, | |
| "learning_rate": 1.821322403143969e-05, | |
| "loss": 0.1392, | |
| "mean_token_accuracy": 0.9508139789104462, | |
| "num_tokens": 23482604.0, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 2.731182795698925, | |
| "grad_norm": 0.9120084675913216, | |
| "learning_rate": 1.812570339007983e-05, | |
| "loss": 0.1506, | |
| "mean_token_accuracy": 0.9453353136777878, | |
| "num_tokens": 23865993.0, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 2.774193548387097, | |
| "grad_norm": 0.7503811468513143, | |
| "learning_rate": 1.8036311154549783e-05, | |
| "loss": 0.1363, | |
| "mean_token_accuracy": 0.9510210454463959, | |
| "num_tokens": 24224549.0, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 2.817204301075269, | |
| "grad_norm": 1.1460712341286583, | |
| "learning_rate": 1.7945067914573147e-05, | |
| "loss": 0.1664, | |
| "mean_token_accuracy": 0.9413717687129974, | |
| "num_tokens": 24598626.0, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 2.860215053763441, | |
| "grad_norm": 0.8670992068464418, | |
| "learning_rate": 1.7851994686215592e-05, | |
| "loss": 0.1577, | |
| "mean_token_accuracy": 0.9477098882198334, | |
| "num_tokens": 24981628.0, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 2.903225806451613, | |
| "grad_norm": 0.7836229996954126, | |
| "learning_rate": 1.77571129070442e-05, | |
| "loss": 0.1427, | |
| "mean_token_accuracy": 0.9498772025108337, | |
| "num_tokens": 25357328.0, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 2.946236559139785, | |
| "grad_norm": 0.9864794829369287, | |
| "learning_rate": 1.766044443118978e-05, | |
| "loss": 0.1548, | |
| "mean_token_accuracy": 0.9467099010944366, | |
| "num_tokens": 25736995.0, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 2.989247311827957, | |
| "grad_norm": 0.8453374404480977, | |
| "learning_rate": 1.7562011524313187e-05, | |
| "loss": 0.1328, | |
| "mean_token_accuracy": 0.9533079415559769, | |
| "num_tokens": 26119812.0, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 0.8453374404480977, | |
| "learning_rate": 1.7461836858476858e-05, | |
| "loss": 0.023, | |
| "mean_token_accuracy": 0.9692671298980713, | |
| "num_tokens": 26217050.0, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 3.043010752688172, | |
| "grad_norm": 0.7191008184533193, | |
| "learning_rate": 1.7359943506922775e-05, | |
| "loss": 0.085, | |
| "mean_token_accuracy": 0.9692452400922775, | |
| "num_tokens": 26579836.0, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 3.086021505376344, | |
| "grad_norm": 0.7033653480672031, | |
| "learning_rate": 1.725635493875799e-05, | |
| "loss": 0.084, | |
| "mean_token_accuracy": 0.9705889225006104, | |
| "num_tokens": 26954186.0, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 3.129032258064516, | |
| "grad_norm": 0.5815187767804908, | |
| "learning_rate": 1.7151095013548996e-05, | |
| "loss": 0.0795, | |
| "mean_token_accuracy": 0.9710359871387482, | |
| "num_tokens": 27331222.0, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 3.172043010752688, | |
| "grad_norm": 0.6837466146484604, | |
| "learning_rate": 1.7044187975826126e-05, | |
| "loss": 0.0779, | |
| "mean_token_accuracy": 0.9715389311313629, | |
| "num_tokens": 27695626.0, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 3.21505376344086, | |
| "grad_norm": 0.7647564443576131, | |
| "learning_rate": 1.693565844949933e-05, | |
| "loss": 0.0592, | |
| "mean_token_accuracy": 0.9778129011392593, | |
| "num_tokens": 28074766.0, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 3.258064516129032, | |
| "grad_norm": 0.6960445168765227, | |
| "learning_rate": 1.6825531432186545e-05, | |
| "loss": 0.0825, | |
| "mean_token_accuracy": 0.9732242673635483, | |
| "num_tokens": 28441758.0, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 3.3010752688172045, | |
| "grad_norm": 0.8162134588348661, | |
| "learning_rate": 1.671383228945597e-05, | |
| "loss": 0.0795, | |
| "mean_token_accuracy": 0.9723010808229446, | |
| "num_tokens": 28816556.0, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 3.3440860215053765, | |
| "grad_norm": 0.7752408104041487, | |
| "learning_rate": 1.6600586748983642e-05, | |
| "loss": 0.0791, | |
| "mean_token_accuracy": 0.9741756767034531, | |
| "num_tokens": 29190153.0, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 3.3870967741935485, | |
| "grad_norm": 0.6264771832821134, | |
| "learning_rate": 1.648582089462756e-05, | |
| "loss": 0.0885, | |
| "mean_token_accuracy": 0.9700659811496735, | |
| "num_tokens": 29556225.0, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 3.4301075268817205, | |
| "grad_norm": 0.7087453451133532, | |
| "learning_rate": 1.6369561160419783e-05, | |
| "loss": 0.0756, | |
| "mean_token_accuracy": 0.9748696535825729, | |
| "num_tokens": 29944628.0, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 3.4731182795698925, | |
| "grad_norm": 0.6666804960250854, | |
| "learning_rate": 1.625183432447789e-05, | |
| "loss": 0.0768, | |
| "mean_token_accuracy": 0.9723282903432846, | |
| "num_tokens": 30326403.0, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 3.5161290322580645, | |
| "grad_norm": 0.7492247001485264, | |
| "learning_rate": 1.6132667502837164e-05, | |
| "loss": 0.0862, | |
| "mean_token_accuracy": 0.9687108993530273, | |
| "num_tokens": 30707604.0, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 3.5591397849462365, | |
| "grad_norm": 0.6413601535507213, | |
| "learning_rate": 1.6012088143204953e-05, | |
| "loss": 0.0771, | |
| "mean_token_accuracy": 0.9721613973379135, | |
| "num_tokens": 31094255.0, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 3.6021505376344085, | |
| "grad_norm": 0.7214880008358299, | |
| "learning_rate": 1.589012401863864e-05, | |
| "loss": 0.0902, | |
| "mean_token_accuracy": 0.9686519354581833, | |
| "num_tokens": 31465554.0, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 3.6451612903225805, | |
| "grad_norm": 0.6072340945600113, | |
| "learning_rate": 1.5766803221148676e-05, | |
| "loss": 0.0783, | |
| "mean_token_accuracy": 0.97231625020504, | |
| "num_tokens": 31841954.0, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 3.688172043010753, | |
| "grad_norm": 0.7141144509929378, | |
| "learning_rate": 1.5642154155228124e-05, | |
| "loss": 0.0784, | |
| "mean_token_accuracy": 0.9737986773252487, | |
| "num_tokens": 32231678.0, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 3.731182795698925, | |
| "grad_norm": 0.5930435846744901, | |
| "learning_rate": 1.5516205531310272e-05, | |
| "loss": 0.0773, | |
| "mean_token_accuracy": 0.9729326516389847, | |
| "num_tokens": 32611952.0, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 3.774193548387097, | |
| "grad_norm": 0.6873339835587674, | |
| "learning_rate": 1.538898635915576e-05, | |
| "loss": 0.0917, | |
| "mean_token_accuracy": 0.9682262241840363, | |
| "num_tokens": 32987305.0, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 3.817204301075269, | |
| "grad_norm": 0.628718128223866, | |
| "learning_rate": 1.526052594117071e-05, | |
| "loss": 0.0769, | |
| "mean_token_accuracy": 0.9746036380529404, | |
| "num_tokens": 33361173.0, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 3.860215053763441, | |
| "grad_norm": 0.7444168004427851, | |
| "learning_rate": 1.513085386565758e-05, | |
| "loss": 0.0766, | |
| "mean_token_accuracy": 0.9750553965568542, | |
| "num_tokens": 33744871.0, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 3.903225806451613, | |
| "grad_norm": 0.6080354599152884, | |
| "learning_rate": 1.5000000000000002e-05, | |
| "loss": 0.0881, | |
| "mean_token_accuracy": 0.9682539403438568, | |
| "num_tokens": 34122873.0, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 3.946236559139785, | |
| "grad_norm": 0.6254599667505842, | |
| "learning_rate": 1.4867994483783485e-05, | |
| "loss": 0.0674, | |
| "mean_token_accuracy": 0.9770089983940125, | |
| "num_tokens": 34492949.0, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 3.989247311827957, | |
| "grad_norm": 0.5928494398285244, | |
| "learning_rate": 1.4734867721853341e-05, | |
| "loss": 0.0723, | |
| "mean_token_accuracy": 0.9745485931634903, | |
| "num_tokens": 34864951.0, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 0.6180172580268953, | |
| "learning_rate": 1.4600650377311523e-05, | |
| "loss": 0.0162, | |
| "mean_token_accuracy": 0.9717925190925598, | |
| "num_tokens": 34956848.0, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 4.043010752688172, | |
| "grad_norm": 0.4798007088122171, | |
| "learning_rate": 1.4465373364454001e-05, | |
| "loss": 0.0415, | |
| "mean_token_accuracy": 0.9860930442810059, | |
| "num_tokens": 35327401.0, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 4.086021505376344, | |
| "grad_norm": 0.5030678863912684, | |
| "learning_rate": 1.4329067841650274e-05, | |
| "loss": 0.0512, | |
| "mean_token_accuracy": 0.9837829172611237, | |
| "num_tokens": 35697068.0, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 4.129032258064516, | |
| "grad_norm": 0.4887047754497154, | |
| "learning_rate": 1.4191765204166643e-05, | |
| "loss": 0.035, | |
| "mean_token_accuracy": 0.9878916591405869, | |
| "num_tokens": 36078911.0, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 4.172043010752688, | |
| "grad_norm": 0.556764696779543, | |
| "learning_rate": 1.4053497076934948e-05, | |
| "loss": 0.0388, | |
| "mean_token_accuracy": 0.9873095601797104, | |
| "num_tokens": 36457881.0, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 4.21505376344086, | |
| "grad_norm": 0.6105146685797401, | |
| "learning_rate": 1.3914295307268396e-05, | |
| "loss": 0.0426, | |
| "mean_token_accuracy": 0.9871442914009094, | |
| "num_tokens": 36840608.0, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 4.258064516129032, | |
| "grad_norm": 0.5520281427427763, | |
| "learning_rate": 1.3774191957526144e-05, | |
| "loss": 0.0353, | |
| "mean_token_accuracy": 0.9887934029102325, | |
| "num_tokens": 37215456.0, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 4.301075268817204, | |
| "grad_norm": 0.5752554242126501, | |
| "learning_rate": 1.3633219297728415e-05, | |
| "loss": 0.049, | |
| "mean_token_accuracy": 0.9847937226295471, | |
| "num_tokens": 37600868.0, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 4.344086021505376, | |
| "grad_norm": 0.6228700798176202, | |
| "learning_rate": 1.3491409798123687e-05, | |
| "loss": 0.041, | |
| "mean_token_accuracy": 0.9858721643686295, | |
| "num_tokens": 37964996.0, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 4.387096774193548, | |
| "grad_norm": 0.545565814617341, | |
| "learning_rate": 1.3348796121709862e-05, | |
| "loss": 0.0432, | |
| "mean_token_accuracy": 0.9834143966436386, | |
| "num_tokens": 38331633.0, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 4.43010752688172, | |
| "grad_norm": 0.6431910601756529, | |
| "learning_rate": 1.3205411116710973e-05, | |
| "loss": 0.044, | |
| "mean_token_accuracy": 0.9857050627470016, | |
| "num_tokens": 38703641.0, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 4.473118279569892, | |
| "grad_norm": 0.6183635335692609, | |
| "learning_rate": 1.3061287809011243e-05, | |
| "loss": 0.0486, | |
| "mean_token_accuracy": 0.9833229929208755, | |
| "num_tokens": 39075811.0, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 4.516129032258064, | |
| "grad_norm": 0.5734112514278982, | |
| "learning_rate": 1.291645939454825e-05, | |
| "loss": 0.0466, | |
| "mean_token_accuracy": 0.982891172170639, | |
| "num_tokens": 39453732.0, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 4.559139784946236, | |
| "grad_norm": 0.5062927460342779, | |
| "learning_rate": 1.277095923166689e-05, | |
| "loss": 0.0407, | |
| "mean_token_accuracy": 0.9859268963336945, | |
| "num_tokens": 39846622.0, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 4.602150537634409, | |
| "grad_norm": 0.6337986686205553, | |
| "learning_rate": 1.2624820833435939e-05, | |
| "loss": 0.0433, | |
| "mean_token_accuracy": 0.9856242388486862, | |
| "num_tokens": 40230600.0, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 4.645161290322581, | |
| "grad_norm": 0.4815068983509369, | |
| "learning_rate": 1.2478077859929e-05, | |
| "loss": 0.042, | |
| "mean_token_accuracy": 0.9860651940107346, | |
| "num_tokens": 40608421.0, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 4.688172043010753, | |
| "grad_norm": 0.3922313992389828, | |
| "learning_rate": 1.2330764110471567e-05, | |
| "loss": 0.0297, | |
| "mean_token_accuracy": 0.9892211109399796, | |
| "num_tokens": 40988293.0, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 4.731182795698925, | |
| "grad_norm": 0.46224024107519485, | |
| "learning_rate": 1.2182913515856016e-05, | |
| "loss": 0.0449, | |
| "mean_token_accuracy": 0.984123483300209, | |
| "num_tokens": 41357882.0, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 4.774193548387097, | |
| "grad_norm": 0.5508908953527153, | |
| "learning_rate": 1.2034560130526341e-05, | |
| "loss": 0.0467, | |
| "mean_token_accuracy": 0.9867139011621475, | |
| "num_tokens": 41725815.0, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 4.817204301075269, | |
| "grad_norm": 0.4834883935633812, | |
| "learning_rate": 1.1885738124734359e-05, | |
| "loss": 0.052, | |
| "mean_token_accuracy": 0.9825968146324158, | |
| "num_tokens": 42093847.0, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 4.860215053763441, | |
| "grad_norm": 0.5017978176670704, | |
| "learning_rate": 1.1736481776669307e-05, | |
| "loss": 0.0373, | |
| "mean_token_accuracy": 0.9864843785762787, | |
| "num_tokens": 42470652.0, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 4.903225806451613, | |
| "grad_norm": 0.436532105510215, | |
| "learning_rate": 1.1586825464562515e-05, | |
| "loss": 0.0302, | |
| "mean_token_accuracy": 0.989154502749443, | |
| "num_tokens": 42844022.0, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 4.946236559139785, | |
| "grad_norm": 0.5594322206619027, | |
| "learning_rate": 1.1436803658769082e-05, | |
| "loss": 0.0336, | |
| "mean_token_accuracy": 0.9877772778272629, | |
| "num_tokens": 43217049.0, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 4.989247311827957, | |
| "grad_norm": 0.46803693804934354, | |
| "learning_rate": 1.1286450913828313e-05, | |
| "loss": 0.0441, | |
| "mean_token_accuracy": 0.9841972589492798, | |
| "num_tokens": 43601245.0, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 0.46803693804934354, | |
| "learning_rate": 1.113580186050475e-05, | |
| "loss": 0.0072, | |
| "mean_token_accuracy": 0.9910504221916199, | |
| "num_tokens": 43691552.0, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 5.043010752688172, | |
| "grad_norm": 0.36589314652289223, | |
| "learning_rate": 1.0984891197811686e-05, | |
| "loss": 0.0189, | |
| "mean_token_accuracy": 0.9931964129209518, | |
| "num_tokens": 44069981.0, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 5.086021505376344, | |
| "grad_norm": 0.3262477287689747, | |
| "learning_rate": 1.0833753685018935e-05, | |
| "loss": 0.0172, | |
| "mean_token_accuracy": 0.9938466399908066, | |
| "num_tokens": 44452961.0, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 5.129032258064516, | |
| "grad_norm": 0.37126458666561746, | |
| "learning_rate": 1.0682424133646712e-05, | |
| "loss": 0.0246, | |
| "mean_token_accuracy": 0.9916689246892929, | |
| "num_tokens": 44829261.0, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 5.172043010752688, | |
| "grad_norm": 0.39854735969327243, | |
| "learning_rate": 1.0530937399447496e-05, | |
| "loss": 0.0208, | |
| "mean_token_accuracy": 0.9931624680757523, | |
| "num_tokens": 45195677.0, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 5.21505376344086, | |
| "grad_norm": 0.4011633602771004, | |
| "learning_rate": 1.0379328374377715e-05, | |
| "loss": 0.0187, | |
| "mean_token_accuracy": 0.9931042641401291, | |
| "num_tokens": 45582399.0, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 5.258064516129032, | |
| "grad_norm": 0.4149531963831674, | |
| "learning_rate": 1.0227631978561057e-05, | |
| "loss": 0.0201, | |
| "mean_token_accuracy": 0.9933370649814606, | |
| "num_tokens": 45957909.0, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 5.301075268817204, | |
| "grad_norm": 0.35580739164687514, | |
| "learning_rate": 1.0075883152245334e-05, | |
| "loss": 0.0182, | |
| "mean_token_accuracy": 0.9931957274675369, | |
| "num_tokens": 46331702.0, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 5.344086021505376, | |
| "grad_norm": 0.3659638692288794, | |
| "learning_rate": 9.92411684775467e-06, | |
| "loss": 0.0211, | |
| "mean_token_accuracy": 0.992146834731102, | |
| "num_tokens": 46699926.0, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 5.387096774193548, | |
| "grad_norm": 0.5124446313766259, | |
| "learning_rate": 9.772368021438943e-06, | |
| "loss": 0.0224, | |
| "mean_token_accuracy": 0.9933983087539673, | |
| "num_tokens": 47084802.0, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 5.43010752688172, | |
| "grad_norm": 0.3646431524853468, | |
| "learning_rate": 9.620671625622287e-06, | |
| "loss": 0.0213, | |
| "mean_token_accuracy": 0.9931617379188538, | |
| "num_tokens": 47454900.0, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 5.473118279569892, | |
| "grad_norm": 0.43644622149160334, | |
| "learning_rate": 9.469062600552509e-06, | |
| "loss": 0.0183, | |
| "mean_token_accuracy": 0.9936078786849976, | |
| "num_tokens": 47824110.0, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 5.516129032258064, | |
| "grad_norm": 0.40737855715835, | |
| "learning_rate": 9.317575866353293e-06, | |
| "loss": 0.0167, | |
| "mean_token_accuracy": 0.9936787039041519, | |
| "num_tokens": 48199410.0, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 5.559139784946236, | |
| "grad_norm": 0.3453003430092695, | |
| "learning_rate": 9.166246314981066e-06, | |
| "loss": 0.016, | |
| "mean_token_accuracy": 0.9941088110208511, | |
| "num_tokens": 48571600.0, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 5.602150537634409, | |
| "grad_norm": 0.4044531156287549, | |
| "learning_rate": 9.015108802188314e-06, | |
| "loss": 0.0203, | |
| "mean_token_accuracy": 0.9922708123922348, | |
| "num_tokens": 48944562.0, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 5.645161290322581, | |
| "grad_norm": 0.39933569411586073, | |
| "learning_rate": 8.86419813949525e-06, | |
| "loss": 0.026, | |
| "mean_token_accuracy": 0.9936483949422836, | |
| "num_tokens": 49329440.0, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 5.688172043010753, | |
| "grad_norm": 0.5189339190149178, | |
| "learning_rate": 8.71354908617169e-06, | |
| "loss": 0.0142, | |
| "mean_token_accuracy": 0.9950688779354095, | |
| "num_tokens": 49715705.0, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 5.731182795698925, | |
| "grad_norm": 0.4042026330549438, | |
| "learning_rate": 8.56319634123092e-06, | |
| "loss": 0.0192, | |
| "mean_token_accuracy": 0.9928712397813797, | |
| "num_tokens": 50105592.0, | |
| "step": 137 | |
| }, | |
| { | |
| "epoch": 5.774193548387097, | |
| "grad_norm": 0.4159641462800478, | |
| "learning_rate": 8.413174535437486e-06, | |
| "loss": 0.0197, | |
| "mean_token_accuracy": 0.9930095821619034, | |
| "num_tokens": 50473889.0, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 5.817204301075269, | |
| "grad_norm": 0.46704221505297566, | |
| "learning_rate": 8.263518223330698e-06, | |
| "loss": 0.0264, | |
| "mean_token_accuracy": 0.9904347956180573, | |
| "num_tokens": 50848608.0, | |
| "step": 139 | |
| }, | |
| { | |
| "epoch": 5.860215053763441, | |
| "grad_norm": 0.4423535789298909, | |
| "learning_rate": 8.114261875265643e-06, | |
| "loss": 0.0202, | |
| "mean_token_accuracy": 0.9932683557271957, | |
| "num_tokens": 51213311.0, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 5.903225806451613, | |
| "grad_norm": 0.39628026138404, | |
| "learning_rate": 7.965439869473664e-06, | |
| "loss": 0.02, | |
| "mean_token_accuracy": 0.9924024939537048, | |
| "num_tokens": 51582321.0, | |
| "step": 141 | |
| }, | |
| { | |
| "epoch": 5.946236559139785, | |
| "grad_norm": 0.40141553841310146, | |
| "learning_rate": 7.817086484143987e-06, | |
| "loss": 0.0251, | |
| "mean_token_accuracy": 0.9912929236888885, | |
| "num_tokens": 51957175.0, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 5.989247311827957, | |
| "grad_norm": 0.37692584612441205, | |
| "learning_rate": 7.669235889528436e-06, | |
| "loss": 0.0192, | |
| "mean_token_accuracy": 0.9928417950868607, | |
| "num_tokens": 52337657.0, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 0.37692584612441205, | |
| "learning_rate": 7.521922140071003e-06, | |
| "loss": 0.0029, | |
| "mean_token_accuracy": 0.9955406785011292, | |
| "num_tokens": 52435404.0, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 6.043010752688172, | |
| "grad_norm": 0.2777527936559124, | |
| "learning_rate": 7.375179166564062e-06, | |
| "loss": 0.0096, | |
| "mean_token_accuracy": 0.9978060573339462, | |
| "num_tokens": 52801343.0, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 6.086021505376344, | |
| "grad_norm": 0.22061818508623526, | |
| "learning_rate": 7.2290407683331154e-06, | |
| "loss": 0.0128, | |
| "mean_token_accuracy": 0.9961294829845428, | |
| "num_tokens": 53170023.0, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 6.129032258064516, | |
| "grad_norm": 0.2450891259162475, | |
| "learning_rate": 7.0835406054517505e-06, | |
| "loss": 0.0098, | |
| "mean_token_accuracy": 0.9963818788528442, | |
| "num_tokens": 53540818.0, | |
| "step": 147 | |
| }, | |
| { | |
| "epoch": 6.172043010752688, | |
| "grad_norm": 0.27354650244267165, | |
| "learning_rate": 6.93871219098876e-06, | |
| "loss": 0.0083, | |
| "mean_token_accuracy": 0.9976299554109573, | |
| "num_tokens": 53912206.0, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 6.21505376344086, | |
| "grad_norm": 0.24292132564074775, | |
| "learning_rate": 6.79458888328903e-06, | |
| "loss": 0.0091, | |
| "mean_token_accuracy": 0.9975238144397736, | |
| "num_tokens": 54290644.0, | |
| "step": 149 | |
| }, | |
| { | |
| "epoch": 6.258064516129032, | |
| "grad_norm": 0.25903061511703607, | |
| "learning_rate": 6.651203878290139e-06, | |
| "loss": 0.0084, | |
| "mean_token_accuracy": 0.9973570704460144, | |
| "num_tokens": 54671572.0, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 6.301075268817204, | |
| "grad_norm": 0.27785773779575323, | |
| "learning_rate": 6.508590201876317e-06, | |
| "loss": 0.0068, | |
| "mean_token_accuracy": 0.9980671256780624, | |
| "num_tokens": 55043557.0, | |
| "step": 151 | |
| }, | |
| { | |
| "epoch": 6.344086021505376, | |
| "grad_norm": 0.22495303178328654, | |
| "learning_rate": 6.366780702271589e-06, | |
| "loss": 0.0081, | |
| "mean_token_accuracy": 0.9977790415287018, | |
| "num_tokens": 55434084.0, | |
| "step": 152 | |
| }, | |
| { | |
| "epoch": 6.387096774193548, | |
| "grad_norm": 0.21767145483330294, | |
| "learning_rate": 6.225808042473857e-06, | |
| "loss": 0.0073, | |
| "mean_token_accuracy": 0.9982246607542038, | |
| "num_tokens": 55816748.0, | |
| "step": 153 | |
| }, | |
| { | |
| "epoch": 6.43010752688172, | |
| "grad_norm": 0.1720994423666938, | |
| "learning_rate": 6.085704692731609e-06, | |
| "loss": 0.0089, | |
| "mean_token_accuracy": 0.9972024708986282, | |
| "num_tokens": 56204052.0, | |
| "step": 154 | |
| }, | |
| { | |
| "epoch": 6.473118279569892, | |
| "grad_norm": 0.4151871031911838, | |
| "learning_rate": 5.946502923065054e-06, | |
| "loss": 0.0064, | |
| "mean_token_accuracy": 0.9978921562433243, | |
| "num_tokens": 56584279.0, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 6.516129032258064, | |
| "grad_norm": 0.24009379583099646, | |
| "learning_rate": 5.8082347958333625e-06, | |
| "loss": 0.0061, | |
| "mean_token_accuracy": 0.9981395900249481, | |
| "num_tokens": 56966123.0, | |
| "step": 156 | |
| }, | |
| { | |
| "epoch": 6.559139784946236, | |
| "grad_norm": 0.24984505593518833, | |
| "learning_rate": 5.670932158349732e-06, | |
| "loss": 0.0079, | |
| "mean_token_accuracy": 0.9977651834487915, | |
| "num_tokens": 57340499.0, | |
| "step": 157 | |
| }, | |
| { | |
| "epoch": 6.602150537634409, | |
| "grad_norm": 0.22800352911247235, | |
| "learning_rate": 5.534626635546e-06, | |
| "loss": 0.0066, | |
| "mean_token_accuracy": 0.9973956197500229, | |
| "num_tokens": 57722362.0, | |
| "step": 158 | |
| }, | |
| { | |
| "epoch": 6.645161290322581, | |
| "grad_norm": 0.3972684492677971, | |
| "learning_rate": 5.399349622688479e-06, | |
| "loss": 0.0129, | |
| "mean_token_accuracy": 0.9956333786249161, | |
| "num_tokens": 58089687.0, | |
| "step": 159 | |
| }, | |
| { | |
| "epoch": 6.688172043010753, | |
| "grad_norm": 0.2855037997818557, | |
| "learning_rate": 5.2651322781466606e-06, | |
| "loss": 0.0097, | |
| "mean_token_accuracy": 0.9961634278297424, | |
| "num_tokens": 58440751.0, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 6.731182795698925, | |
| "grad_norm": 0.2561924349864501, | |
| "learning_rate": 5.132005516216512e-06, | |
| "loss": 0.0095, | |
| "mean_token_accuracy": 0.9967732727527618, | |
| "num_tokens": 58816520.0, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 6.774193548387097, | |
| "grad_norm": 0.2550205734143902, | |
| "learning_rate": 5.000000000000003e-06, | |
| "loss": 0.0096, | |
| "mean_token_accuracy": 0.9970249980688095, | |
| "num_tokens": 59199244.0, | |
| "step": 162 | |
| }, | |
| { | |
| "epoch": 6.817204301075269, | |
| "grad_norm": 0.2895162694155573, | |
| "learning_rate": 4.869146134342426e-06, | |
| "loss": 0.0112, | |
| "mean_token_accuracy": 0.995991975069046, | |
| "num_tokens": 59564510.0, | |
| "step": 163 | |
| }, | |
| { | |
| "epoch": 6.860215053763441, | |
| "grad_norm": 0.27109152841491596, | |
| "learning_rate": 4.739474058829288e-06, | |
| "loss": 0.0098, | |
| "mean_token_accuracy": 0.9966453909873962, | |
| "num_tokens": 59933755.0, | |
| "step": 164 | |
| }, | |
| { | |
| "epoch": 6.903225806451613, | |
| "grad_norm": 0.20196018529315143, | |
| "learning_rate": 4.611013640844245e-06, | |
| "loss": 0.0072, | |
| "mean_token_accuracy": 0.9972820430994034, | |
| "num_tokens": 60318049.0, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 6.946236559139785, | |
| "grad_norm": 0.28770462101147276, | |
| "learning_rate": 4.483794468689728e-06, | |
| "loss": 0.0075, | |
| "mean_token_accuracy": 0.9979050308465958, | |
| "num_tokens": 60702707.0, | |
| "step": 166 | |
| }, | |
| { | |
| "epoch": 6.989247311827957, | |
| "grad_norm": 0.2570986978659673, | |
| "learning_rate": 4.357845844771881e-06, | |
| "loss": 0.0072, | |
| "mean_token_accuracy": 0.9979168772697449, | |
| "num_tokens": 61080551.0, | |
| "step": 167 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "grad_norm": 0.2570986978659673, | |
| "learning_rate": 4.2331967788513295e-06, | |
| "loss": 0.001, | |
| "mean_token_accuracy": 0.9990871548652649, | |
| "num_tokens": 61169909.0, | |
| "step": 168 | |
| }, | |
| { | |
| "epoch": 7.043010752688172, | |
| "grad_norm": 0.18638689720743132, | |
| "learning_rate": 4.109875981361363e-06, | |
| "loss": 0.0029, | |
| "mean_token_accuracy": 0.9993208199739456, | |
| "num_tokens": 61548169.0, | |
| "step": 169 | |
| }, | |
| { | |
| "epoch": 7.086021505376344, | |
| "grad_norm": 0.09274868532077754, | |
| "learning_rate": 3.987911856795047e-06, | |
| "loss": 0.0024, | |
| "mean_token_accuracy": 0.9993245899677277, | |
| "num_tokens": 61927780.0, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 7.129032258064516, | |
| "grad_norm": 0.11314592234156305, | |
| "learning_rate": 3.867332497162836e-06, | |
| "loss": 0.0043, | |
| "mean_token_accuracy": 0.9991153180599213, | |
| "num_tokens": 62295765.0, | |
| "step": 171 | |
| }, | |
| { | |
| "epoch": 7.172043010752688, | |
| "grad_norm": 0.125324034467096, | |
| "learning_rate": 3.748165675522113e-06, | |
| "loss": 0.0043, | |
| "mean_token_accuracy": 0.998317152261734, | |
| "num_tokens": 62680703.0, | |
| "step": 172 | |
| }, | |
| { | |
| "epoch": 7.21505376344086, | |
| "grad_norm": 0.162207800509228, | |
| "learning_rate": 3.630438839580217e-06, | |
| "loss": 0.0036, | |
| "mean_token_accuracy": 0.9989175051450729, | |
| "num_tokens": 63048051.0, | |
| "step": 173 | |
| }, | |
| { | |
| "epoch": 7.258064516129032, | |
| "grad_norm": 0.14736979534555758, | |
| "learning_rate": 3.5141791053724405e-06, | |
| "loss": 0.0031, | |
| "mean_token_accuracy": 0.9988811016082764, | |
| "num_tokens": 63420296.0, | |
| "step": 174 | |
| }, | |
| { | |
| "epoch": 7.301075268817204, | |
| "grad_norm": 0.24994209797043623, | |
| "learning_rate": 3.399413251016359e-06, | |
| "loss": 0.0045, | |
| "mean_token_accuracy": 0.9984373897314072, | |
| "num_tokens": 63802044.0, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 7.344086021505376, | |
| "grad_norm": 0.19480816289308553, | |
| "learning_rate": 3.2861677105440335e-06, | |
| "loss": 0.004, | |
| "mean_token_accuracy": 0.9981936365365982, | |
| "num_tokens": 64179447.0, | |
| "step": 176 | |
| }, | |
| { | |
| "epoch": 7.387096774193548, | |
| "grad_norm": 0.13577926396280693, | |
| "learning_rate": 3.174468567813461e-06, | |
| "loss": 0.0038, | |
| "mean_token_accuracy": 0.9986472874879837, | |
| "num_tokens": 64553814.0, | |
| "step": 177 | |
| }, | |
| { | |
| "epoch": 7.43010752688172, | |
| "grad_norm": 0.1558952871547043, | |
| "learning_rate": 3.0643415505006733e-06, | |
| "loss": 0.0025, | |
| "mean_token_accuracy": 0.9991411715745926, | |
| "num_tokens": 64937169.0, | |
| "step": 178 | |
| }, | |
| { | |
| "epoch": 7.473118279569892, | |
| "grad_norm": 0.19500035034736574, | |
| "learning_rate": 2.9558120241738786e-06, | |
| "loss": 0.0047, | |
| "mean_token_accuracy": 0.9986667931079865, | |
| "num_tokens": 65304620.0, | |
| "step": 179 | |
| }, | |
| { | |
| "epoch": 7.516129032258064, | |
| "grad_norm": 0.11403208704075335, | |
| "learning_rate": 2.8489049864510053e-06, | |
| "loss": 0.002, | |
| "mean_token_accuracy": 0.9994394332170486, | |
| "num_tokens": 65683792.0, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 7.559139784946236, | |
| "grad_norm": 0.09515486390531766, | |
| "learning_rate": 2.7436450612420098e-06, | |
| "loss": 0.0033, | |
| "mean_token_accuracy": 0.9987542629241943, | |
| "num_tokens": 66052076.0, | |
| "step": 181 | |
| }, | |
| { | |
| "epoch": 7.602150537634409, | |
| "grad_norm": 0.11427433435445018, | |
| "learning_rate": 2.640056493077231e-06, | |
| "loss": 0.0039, | |
| "mean_token_accuracy": 0.9984430074691772, | |
| "num_tokens": 66429336.0, | |
| "step": 182 | |
| }, | |
| { | |
| "epoch": 7.645161290322581, | |
| "grad_norm": 0.14803270246342048, | |
| "learning_rate": 2.5381631415231455e-06, | |
| "loss": 0.0024, | |
| "mean_token_accuracy": 0.9995441436767578, | |
| "num_tokens": 66808483.0, | |
| "step": 183 | |
| }, | |
| { | |
| "epoch": 7.688172043010753, | |
| "grad_norm": 0.060271743291256424, | |
| "learning_rate": 2.4379884756868167e-06, | |
| "loss": 0.0013, | |
| "mean_token_accuracy": 0.9994434714317322, | |
| "num_tokens": 67196644.0, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 7.731182795698925, | |
| "grad_norm": 0.15449382158867014, | |
| "learning_rate": 2.339555568810221e-06, | |
| "loss": 0.0033, | |
| "mean_token_accuracy": 0.998866930603981, | |
| "num_tokens": 67576705.0, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 7.774193548387097, | |
| "grad_norm": 0.15970881151513372, | |
| "learning_rate": 2.2428870929558012e-06, | |
| "loss": 0.0022, | |
| "mean_token_accuracy": 0.9994314312934875, | |
| "num_tokens": 67955564.0, | |
| "step": 186 | |
| }, | |
| { | |
| "epoch": 7.817204301075269, | |
| "grad_norm": 0.17023262969614245, | |
| "learning_rate": 2.1480053137844115e-06, | |
| "loss": 0.0023, | |
| "mean_token_accuracy": 0.999400720000267, | |
| "num_tokens": 68336162.0, | |
| "step": 187 | |
| }, | |
| { | |
| "epoch": 7.860215053763441, | |
| "grad_norm": 0.12445830166048269, | |
| "learning_rate": 2.054932085426856e-06, | |
| "loss": 0.0025, | |
| "mean_token_accuracy": 0.9985401034355164, | |
| "num_tokens": 68713903.0, | |
| "step": 188 | |
| }, | |
| { | |
| "epoch": 7.903225806451613, | |
| "grad_norm": 0.10664444106034968, | |
| "learning_rate": 1.963688845450218e-06, | |
| "loss": 0.0042, | |
| "mean_token_accuracy": 0.9983135461807251, | |
| "num_tokens": 69083101.0, | |
| "step": 189 | |
| }, | |
| { | |
| "epoch": 7.946236559139785, | |
| "grad_norm": 0.20267328769221749, | |
| "learning_rate": 1.8742966099201699e-06, | |
| "loss": 0.0025, | |
| "mean_token_accuracy": 0.9991088360548019, | |
| "num_tokens": 69450911.0, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 7.989247311827957, | |
| "grad_norm": 0.07966294331340328, | |
| "learning_rate": 1.7867759685603115e-06, | |
| "loss": 0.0043, | |
| "mean_token_accuracy": 0.9985426664352417, | |
| "num_tokens": 69816019.0, | |
| "step": 191 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "grad_norm": 0.160752083075703, | |
| "learning_rate": 1.7011470800097496e-06, | |
| "loss": 0.0006, | |
| "mean_token_accuracy": 0.9995448589324951, | |
| "num_tokens": 69909418.0, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 8.043010752688172, | |
| "grad_norm": 0.12953983503212843, | |
| "learning_rate": 1.6174296671799571e-06, | |
| "loss": 0.0023, | |
| "mean_token_accuracy": 0.9991968870162964, | |
| "num_tokens": 70287063.0, | |
| "step": 193 | |
| }, | |
| { | |
| "epoch": 8.086021505376344, | |
| "grad_norm": 0.11355014942214375, | |
| "learning_rate": 1.5356430127119915e-06, | |
| "loss": 0.0023, | |
| "mean_token_accuracy": 0.9992813766002655, | |
| "num_tokens": 70677976.0, | |
| "step": 194 | |
| }, | |
| { | |
| "epoch": 8.129032258064516, | |
| "grad_norm": 0.09452954664073393, | |
| "learning_rate": 1.4558059545351144e-06, | |
| "loss": 0.0022, | |
| "mean_token_accuracy": 0.9995724707841873, | |
| "num_tokens": 71039103.0, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 8.172043010752688, | |
| "grad_norm": 0.0611357247699829, | |
| "learning_rate": 1.3779368815278648e-06, | |
| "loss": 0.0013, | |
| "mean_token_accuracy": 0.9998900592327118, | |
| "num_tokens": 71408946.0, | |
| "step": 196 | |
| }, | |
| { | |
| "epoch": 8.21505376344086, | |
| "grad_norm": 0.05878301201751446, | |
| "learning_rate": 1.302053729282533e-06, | |
| "loss": 0.0015, | |
| "mean_token_accuracy": 0.9993925988674164, | |
| "num_tokens": 71787495.0, | |
| "step": 197 | |
| }, | |
| { | |
| "epoch": 8.258064516129032, | |
| "grad_norm": 0.05644673250508509, | |
| "learning_rate": 1.2281739759740575e-06, | |
| "loss": 0.0012, | |
| "mean_token_accuracy": 1.0, | |
| "num_tokens": 72167744.0, | |
| "step": 198 | |
| }, | |
| { | |
| "epoch": 8.301075268817204, | |
| "grad_norm": 0.0678174975886251, | |
| "learning_rate": 1.156314638334277e-06, | |
| "loss": 0.0014, | |
| "mean_token_accuracy": 0.9995383620262146, | |
| "num_tokens": 72535197.0, | |
| "step": 199 | |
| }, | |
| { | |
| "epoch": 8.344086021505376, | |
| "grad_norm": 0.07187441226828475, | |
| "learning_rate": 1.086492267732462e-06, | |
| "loss": 0.0017, | |
| "mean_token_accuracy": 0.9995486289262772, | |
| "num_tokens": 72912819.0, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 8.387096774193548, | |
| "grad_norm": 0.07795205289710616, | |
| "learning_rate": 1.01872294636304e-06, | |
| "loss": 0.0013, | |
| "mean_token_accuracy": 0.9996720552444458, | |
| "num_tokens": 73288384.0, | |
| "step": 201 | |
| }, | |
| { | |
| "epoch": 8.43010752688172, | |
| "grad_norm": 0.07266542626737585, | |
| "learning_rate": 9.530222835413739e-07, | |
| "loss": 0.002, | |
| "mean_token_accuracy": 0.9992012083530426, | |
| "num_tokens": 73649231.0, | |
| "step": 202 | |
| }, | |
| { | |
| "epoch": 8.473118279569892, | |
| "grad_norm": 0.05690867261143057, | |
| "learning_rate": 8.894054121084839e-07, | |
| "loss": 0.0013, | |
| "mean_token_accuracy": 0.9995424598455429, | |
| "num_tokens": 74024940.0, | |
| "step": 203 | |
| }, | |
| { | |
| "epoch": 8.516129032258064, | |
| "grad_norm": 0.07784424768246319, | |
| "learning_rate": 8.278869849454718e-07, | |
| "loss": 0.0015, | |
| "mean_token_accuracy": 0.9997651875019073, | |
| "num_tokens": 74393707.0, | |
| "step": 204 | |
| }, | |
| { | |
| "epoch": 8.559139784946236, | |
| "grad_norm": 0.08544567215406322, | |
| "learning_rate": 7.684811715985429e-07, | |
| "loss": 0.0016, | |
| "mean_token_accuracy": 0.9993268847465515, | |
| "num_tokens": 74769770.0, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 8.602150537634408, | |
| "grad_norm": 0.04848205681346562, | |
| "learning_rate": 7.1120165501533e-07, | |
| "loss": 0.0011, | |
| "mean_token_accuracy": 0.9996765702962875, | |
| "num_tokens": 75144638.0, | |
| "step": 206 | |
| }, | |
| { | |
| "epoch": 8.64516129032258, | |
| "grad_norm": 0.07153581712473589, | |
| "learning_rate": 6.560616283932897e-07, | |
| "loss": 0.0019, | |
| "mean_token_accuracy": 0.9993336498737335, | |
| "num_tokens": 75517948.0, | |
| "step": 207 | |
| }, | |
| { | |
| "epoch": 8.688172043010752, | |
| "grad_norm": 0.03218818118067709, | |
| "learning_rate": 6.030737921409169e-07, | |
| "loss": 0.0005, | |
| "mean_token_accuracy": 1.0, | |
| "num_tokens": 75913411.0, | |
| "step": 208 | |
| }, | |
| { | |
| "epoch": 8.731182795698924, | |
| "grad_norm": 0.043871722359634206, | |
| "learning_rate": 5.522503509524591e-07, | |
| "loss": 0.0011, | |
| "mean_token_accuracy": 0.9997778683900833, | |
| "num_tokens": 76292535.0, | |
| "step": 209 | |
| }, | |
| { | |
| "epoch": 8.774193548387096, | |
| "grad_norm": 0.11523726962843446, | |
| "learning_rate": 5.036030109968082e-07, | |
| "loss": 0.0013, | |
| "mean_token_accuracy": 0.9996742606163025, | |
| "num_tokens": 76668845.0, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 8.817204301075268, | |
| "grad_norm": 0.0558278621305881, | |
| "learning_rate": 4.5714297722121105e-07, | |
| "loss": 0.0012, | |
| "mean_token_accuracy": 0.9997794181108475, | |
| "num_tokens": 77057588.0, | |
| "step": 211 | |
| }, | |
| { | |
| "epoch": 8.86021505376344, | |
| "grad_norm": 0.05082679865672205, | |
| "learning_rate": 4.128809507704445e-07, | |
| "loss": 0.0008, | |
| "mean_token_accuracy": 1.0, | |
| "num_tokens": 77440951.0, | |
| "step": 212 | |
| }, | |
| { | |
| "epoch": 8.903225806451612, | |
| "grad_norm": 0.06077000745250929, | |
| "learning_rate": 3.708271265220087e-07, | |
| "loss": 0.0014, | |
| "mean_token_accuracy": 0.9994602203369141, | |
| "num_tokens": 77816116.0, | |
| "step": 213 | |
| }, | |
| { | |
| "epoch": 8.946236559139784, | |
| "grad_norm": 0.05162284627072263, | |
| "learning_rate": 3.309911907379393e-07, | |
| "loss": 0.0011, | |
| "mean_token_accuracy": 0.9997801780700684, | |
| "num_tokens": 78172575.0, | |
| "step": 214 | |
| }, | |
| { | |
| "epoch": 8.989247311827956, | |
| "grad_norm": 0.05274284118745483, | |
| "learning_rate": 2.9338231883378365e-07, | |
| "loss": 0.0013, | |
| "mean_token_accuracy": 0.999669536948204, | |
| "num_tokens": 78554073.0, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "grad_norm": 0.05274284118745483, | |
| "learning_rate": 2.5800917326521013e-07, | |
| "loss": 0.0006, | |
| "mean_token_accuracy": 0.9990884065628052, | |
| "num_tokens": 78652031.0, | |
| "step": 216 | |
| }, | |
| { | |
| "epoch": 9.043010752688172, | |
| "grad_norm": 0.057243512265435566, | |
| "learning_rate": 2.248799015327907e-07, | |
| "loss": 0.0008, | |
| "mean_token_accuracy": 0.9998939335346222, | |
| "num_tokens": 79037063.0, | |
| "step": 217 | |
| }, | |
| { | |
| "epoch": 9.086021505376344, | |
| "grad_norm": 0.05131590701841364, | |
| "learning_rate": 1.9400213430538773e-07, | |
| "loss": 0.0016, | |
| "mean_token_accuracy": 0.9996659904718399, | |
| "num_tokens": 79408170.0, | |
| "step": 218 | |
| }, | |
| { | |
| "epoch": 9.129032258064516, | |
| "grad_norm": 0.06907734176650242, | |
| "learning_rate": 1.6538298366257975e-07, | |
| "loss": 0.0013, | |
| "mean_token_accuracy": 0.9998951852321625, | |
| "num_tokens": 79777017.0, | |
| "step": 219 | |
| }, | |
| { | |
| "epoch": 9.172043010752688, | |
| "grad_norm": 0.040367078778572214, | |
| "learning_rate": 1.3902904145653094e-07, | |
| "loss": 0.0008, | |
| "mean_token_accuracy": 1.0, | |
| "num_tokens": 80135769.0, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 9.21505376344086, | |
| "grad_norm": 0.054348476268462176, | |
| "learning_rate": 1.1494637779369766e-07, | |
| "loss": 0.0013, | |
| "mean_token_accuracy": 0.9996242523193359, | |
| "num_tokens": 80517437.0, | |
| "step": 221 | |
| }, | |
| { | |
| "epoch": 9.258064516129032, | |
| "grad_norm": 0.04194468920603892, | |
| "learning_rate": 9.314053963669245e-08, | |
| "loss": 0.0009, | |
| "mean_token_accuracy": 0.9998854249715805, | |
| "num_tokens": 80904669.0, | |
| "step": 222 | |
| }, | |
| { | |
| "epoch": 9.301075268817204, | |
| "grad_norm": 0.050517226281810455, | |
| "learning_rate": 7.361654952665608e-08, | |
| "loss": 0.0009, | |
| "mean_token_accuracy": 1.0, | |
| "num_tokens": 81283528.0, | |
| "step": 223 | |
| }, | |
| { | |
| "epoch": 9.344086021505376, | |
| "grad_norm": 0.04714202439779049, | |
| "learning_rate": 5.637890442641403e-08, | |
| "loss": 0.0011, | |
| "mean_token_accuracy": 0.9998873323202133, | |
| "num_tokens": 81655270.0, | |
| "step": 224 | |
| }, | |
| { | |
| "epoch": 9.387096774193548, | |
| "grad_norm": 0.055175970745747455, | |
| "learning_rate": 4.143157468468717e-08, | |
| "loss": 0.0013, | |
| "mean_token_accuracy": 0.9995371699333191, | |
| "num_tokens": 82027314.0, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 9.43010752688172, | |
| "grad_norm": 0.044056512200001, | |
| "learning_rate": 2.8778003121607834e-08, | |
| "loss": 0.0015, | |
| "mean_token_accuracy": 0.9993147253990173, | |
| "num_tokens": 82402877.0, | |
| "step": 226 | |
| }, | |
| { | |
| "epoch": 9.473118279569892, | |
| "grad_norm": 0.06449776282190266, | |
| "learning_rate": 1.8421104235727406e-08, | |
| "loss": 0.0013, | |
| "mean_token_accuracy": 0.9998880922794342, | |
| "num_tokens": 82793640.0, | |
| "step": 227 | |
| }, | |
| { | |
| "epoch": 9.516129032258064, | |
| "grad_norm": 0.04738484622052601, | |
| "learning_rate": 1.0363263532724433e-08, | |
| "loss": 0.001, | |
| "mean_token_accuracy": 0.999885693192482, | |
| "num_tokens": 83167856.0, | |
| "step": 228 | |
| }, | |
| { | |
| "epoch": 9.559139784946236, | |
| "grad_norm": 0.05120616744078798, | |
| "learning_rate": 4.606336975948589e-09, | |
| "loss": 0.0015, | |
| "mean_token_accuracy": 0.9992931187152863, | |
| "num_tokens": 83549841.0, | |
| "step": 229 | |
| }, | |
| { | |
| "epoch": 9.602150537634408, | |
| "grad_norm": 0.06416283623906281, | |
| "learning_rate": 1.1516505589381777e-09, | |
| "loss": 0.0008, | |
| "mean_token_accuracy": 0.9997781366109848, | |
| "num_tokens": 83929557.0, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 9.602150537634408, | |
| "step": 230, | |
| "total_flos": 824307871121408.0, | |
| "train_loss": 0.1251811162315552, | |
| "train_runtime": 58145.7461, | |
| "train_samples_per_second": 0.507, | |
| "train_steps_per_second": 0.004 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 230, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 10, | |
| "save_steps": 30, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 824307871121408.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |