MoM / scenario_cot_ratio_3B /trainer_state.json
Robot2050's picture
Update scenario_cot_ratio_3B
6edba50 verified
raw
history blame
52.2 kB
{
"best_metric": 0.7144020795822144,
"best_model_checkpoint": "/users/u2023000898/model/atrain_qc/scenario_train_40K_cot_ratio_3/checkpoint-1220",
"epoch": 2.9986708925467744,
"eval_steps": 20,
"global_step": 1833,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01635824557816174,
"grad_norm": 5.295344734073509,
"learning_rate": 5.434782608695653e-07,
"loss": 1.4359,
"step": 10
},
{
"epoch": 0.03271649115632348,
"grad_norm": 3.422164657559387,
"learning_rate": 1.0869565217391306e-06,
"loss": 1.4181,
"step": 20
},
{
"epoch": 0.03271649115632348,
"eval_loss": 1.3935878276824951,
"eval_runtime": 8.9905,
"eval_samples_per_second": 8.787,
"eval_steps_per_second": 4.449,
"step": 20
},
{
"epoch": 0.04907473673448522,
"grad_norm": 1.986613395208058,
"learning_rate": 1.6304347826086957e-06,
"loss": 1.303,
"step": 30
},
{
"epoch": 0.06543298231264696,
"grad_norm": 1.4523886723377106,
"learning_rate": 2.173913043478261e-06,
"loss": 1.1795,
"step": 40
},
{
"epoch": 0.06543298231264696,
"eval_loss": 1.1402088403701782,
"eval_runtime": 8.8832,
"eval_samples_per_second": 8.893,
"eval_steps_per_second": 4.503,
"step": 40
},
{
"epoch": 0.08179122789080871,
"grad_norm": 1.1700390466976565,
"learning_rate": 2.7173913043478263e-06,
"loss": 1.0727,
"step": 50
},
{
"epoch": 0.09814947346897045,
"grad_norm": 0.8879790034956169,
"learning_rate": 3.2608695652173914e-06,
"loss": 0.9978,
"step": 60
},
{
"epoch": 0.09814947346897045,
"eval_loss": 0.9959300756454468,
"eval_runtime": 8.9186,
"eval_samples_per_second": 8.858,
"eval_steps_per_second": 4.485,
"step": 60
},
{
"epoch": 0.1145077190471322,
"grad_norm": 0.8251876813224245,
"learning_rate": 3.804347826086957e-06,
"loss": 0.962,
"step": 70
},
{
"epoch": 0.13086596462529393,
"grad_norm": 0.7597303029800314,
"learning_rate": 4.347826086956522e-06,
"loss": 0.9344,
"step": 80
},
{
"epoch": 0.13086596462529393,
"eval_loss": 0.9332086443901062,
"eval_runtime": 9.1918,
"eval_samples_per_second": 8.595,
"eval_steps_per_second": 4.352,
"step": 80
},
{
"epoch": 0.1472242102034557,
"grad_norm": 0.7643744159525921,
"learning_rate": 4.891304347826087e-06,
"loss": 0.9113,
"step": 90
},
{
"epoch": 0.16358245578161743,
"grad_norm": 0.7298856847403965,
"learning_rate": 5.4347826086956525e-06,
"loss": 0.8836,
"step": 100
},
{
"epoch": 0.16358245578161743,
"eval_loss": 0.8985484838485718,
"eval_runtime": 9.0729,
"eval_samples_per_second": 8.707,
"eval_steps_per_second": 4.409,
"step": 100
},
{
"epoch": 0.17994070135977916,
"grad_norm": 0.7853004926976781,
"learning_rate": 5.978260869565218e-06,
"loss": 0.873,
"step": 110
},
{
"epoch": 0.1962989469379409,
"grad_norm": 0.8086725449370039,
"learning_rate": 6.521739130434783e-06,
"loss": 0.8649,
"step": 120
},
{
"epoch": 0.1962989469379409,
"eval_loss": 0.8743940591812134,
"eval_runtime": 8.8733,
"eval_samples_per_second": 8.903,
"eval_steps_per_second": 4.508,
"step": 120
},
{
"epoch": 0.21265719251610266,
"grad_norm": 0.78611945621464,
"learning_rate": 7.065217391304349e-06,
"loss": 0.8502,
"step": 130
},
{
"epoch": 0.2290154380942644,
"grad_norm": 0.8139497555344073,
"learning_rate": 7.608695652173914e-06,
"loss": 0.8424,
"step": 140
},
{
"epoch": 0.2290154380942644,
"eval_loss": 0.8560407757759094,
"eval_runtime": 12.0219,
"eval_samples_per_second": 6.571,
"eval_steps_per_second": 3.327,
"step": 140
},
{
"epoch": 0.24537368367242612,
"grad_norm": 0.8656686458129763,
"learning_rate": 8.15217391304348e-06,
"loss": 0.8449,
"step": 150
},
{
"epoch": 0.26173192925058786,
"grad_norm": 0.9283042437403443,
"learning_rate": 8.695652173913044e-06,
"loss": 0.8265,
"step": 160
},
{
"epoch": 0.26173192925058786,
"eval_loss": 0.8416913747787476,
"eval_runtime": 9.036,
"eval_samples_per_second": 8.743,
"eval_steps_per_second": 4.427,
"step": 160
},
{
"epoch": 0.2780901748287496,
"grad_norm": 0.810298408576699,
"learning_rate": 9.23913043478261e-06,
"loss": 0.8293,
"step": 170
},
{
"epoch": 0.2944484204069114,
"grad_norm": 0.8313112566441393,
"learning_rate": 9.782608695652175e-06,
"loss": 0.8159,
"step": 180
},
{
"epoch": 0.2944484204069114,
"eval_loss": 0.8295494318008423,
"eval_runtime": 8.8777,
"eval_samples_per_second": 8.899,
"eval_steps_per_second": 4.506,
"step": 180
},
{
"epoch": 0.3108066659850731,
"grad_norm": 0.8697727174133723,
"learning_rate": 9.999673339806799e-06,
"loss": 0.8191,
"step": 190
},
{
"epoch": 0.32716491156323485,
"grad_norm": 0.900525558157214,
"learning_rate": 9.997677237639758e-06,
"loss": 0.8024,
"step": 200
},
{
"epoch": 0.32716491156323485,
"eval_loss": 0.822140097618103,
"eval_runtime": 8.8793,
"eval_samples_per_second": 8.897,
"eval_steps_per_second": 4.505,
"step": 200
},
{
"epoch": 0.3435231571413966,
"grad_norm": 0.8222174164436653,
"learning_rate": 9.993867234795303e-06,
"loss": 0.7988,
"step": 210
},
{
"epoch": 0.3598814027195583,
"grad_norm": 0.8209330165865478,
"learning_rate": 9.98824471410918e-06,
"loss": 0.7934,
"step": 220
},
{
"epoch": 0.3598814027195583,
"eval_loss": 0.8108458518981934,
"eval_runtime": 8.8086,
"eval_samples_per_second": 8.969,
"eval_steps_per_second": 4.541,
"step": 220
},
{
"epoch": 0.37623964829772005,
"grad_norm": 0.7678941313429579,
"learning_rate": 9.980811716268215e-06,
"loss": 0.7959,
"step": 230
},
{
"epoch": 0.3925978938758818,
"grad_norm": 0.82512300528877,
"learning_rate": 9.971570939069637e-06,
"loss": 0.7903,
"step": 240
},
{
"epoch": 0.3925978938758818,
"eval_loss": 0.8030208349227905,
"eval_runtime": 8.8553,
"eval_samples_per_second": 8.921,
"eval_steps_per_second": 4.517,
"step": 240
},
{
"epoch": 0.4089561394540436,
"grad_norm": 0.7846619918234344,
"learning_rate": 9.96052573644193e-06,
"loss": 0.7893,
"step": 250
},
{
"epoch": 0.4253143850322053,
"grad_norm": 0.7731531067943592,
"learning_rate": 9.947680117227512e-06,
"loss": 0.7858,
"step": 260
},
{
"epoch": 0.4253143850322053,
"eval_loss": 0.7969117164611816,
"eval_runtime": 8.8378,
"eval_samples_per_second": 8.939,
"eval_steps_per_second": 4.526,
"step": 260
},
{
"epoch": 0.44167263061036705,
"grad_norm": 0.8323139648125473,
"learning_rate": 9.933038743727749e-06,
"loss": 0.7882,
"step": 270
},
{
"epoch": 0.4580308761885288,
"grad_norm": 0.8203697180665835,
"learning_rate": 9.916606930010765e-06,
"loss": 0.7794,
"step": 280
},
{
"epoch": 0.4580308761885288,
"eval_loss": 0.7901803255081177,
"eval_runtime": 8.873,
"eval_samples_per_second": 8.903,
"eval_steps_per_second": 4.508,
"step": 280
},
{
"epoch": 0.4743891217666905,
"grad_norm": 0.7847835132710914,
"learning_rate": 9.898390639982715e-06,
"loss": 0.7771,
"step": 290
},
{
"epoch": 0.49074736734485225,
"grad_norm": 0.899857567905345,
"learning_rate": 9.878396485223187e-06,
"loss": 0.7759,
"step": 300
},
{
"epoch": 0.49074736734485225,
"eval_loss": 0.7846674919128418,
"eval_runtime": 8.9104,
"eval_samples_per_second": 8.866,
"eval_steps_per_second": 4.489,
"step": 300
},
{
"epoch": 0.507105612923014,
"grad_norm": 0.8725377228611381,
"learning_rate": 9.856631722585545e-06,
"loss": 0.7694,
"step": 310
},
{
"epoch": 0.5234638585011757,
"grad_norm": 0.8975303080276704,
"learning_rate": 9.833104251563058e-06,
"loss": 0.7702,
"step": 320
},
{
"epoch": 0.5234638585011757,
"eval_loss": 0.7819108366966248,
"eval_runtime": 9.1634,
"eval_samples_per_second": 8.621,
"eval_steps_per_second": 4.365,
"step": 320
},
{
"epoch": 0.5398221040793375,
"grad_norm": 0.8040447653131452,
"learning_rate": 9.807822611421783e-06,
"loss": 0.7737,
"step": 330
},
{
"epoch": 0.5561803496574992,
"grad_norm": 0.7791606284623682,
"learning_rate": 9.780795978101265e-06,
"loss": 0.7687,
"step": 340
},
{
"epoch": 0.5561803496574992,
"eval_loss": 0.7773299217224121,
"eval_runtime": 8.8107,
"eval_samples_per_second": 8.966,
"eval_steps_per_second": 4.54,
"step": 340
},
{
"epoch": 0.572538595235661,
"grad_norm": 0.7841990500752456,
"learning_rate": 9.752034160884126e-06,
"loss": 0.7685,
"step": 350
},
{
"epoch": 0.5888968408138228,
"grad_norm": 0.8046795669671181,
"learning_rate": 9.721547598835807e-06,
"loss": 0.7481,
"step": 360
},
{
"epoch": 0.5888968408138228,
"eval_loss": 0.774566113948822,
"eval_runtime": 9.0394,
"eval_samples_per_second": 8.739,
"eval_steps_per_second": 4.425,
"step": 360
},
{
"epoch": 0.6052550863919844,
"grad_norm": 1.183625080940533,
"learning_rate": 9.689347357015721e-06,
"loss": 0.748,
"step": 370
},
{
"epoch": 0.6216133319701462,
"grad_norm": 0.8040310134429428,
"learning_rate": 9.6554451224612e-06,
"loss": 0.7548,
"step": 380
},
{
"epoch": 0.6216133319701462,
"eval_loss": 0.7700055241584778,
"eval_runtime": 8.8572,
"eval_samples_per_second": 8.919,
"eval_steps_per_second": 4.516,
"step": 380
},
{
"epoch": 0.6379715775483079,
"grad_norm": 0.9470332937426067,
"learning_rate": 9.619853199945699e-06,
"loss": 0.7517,
"step": 390
},
{
"epoch": 0.6543298231264697,
"grad_norm": 0.8950375364388944,
"learning_rate": 9.5825845075128e-06,
"loss": 0.7502,
"step": 400
},
{
"epoch": 0.6543298231264697,
"eval_loss": 0.7674439549446106,
"eval_runtime": 8.9218,
"eval_samples_per_second": 8.855,
"eval_steps_per_second": 4.483,
"step": 400
},
{
"epoch": 0.6706880687046314,
"grad_norm": 0.9169203286801689,
"learning_rate": 9.543652571787623e-06,
"loss": 0.7496,
"step": 410
},
{
"epoch": 0.6870463142827932,
"grad_norm": 0.8093191265332849,
"learning_rate": 9.503071523067346e-06,
"loss": 0.7544,
"step": 420
},
{
"epoch": 0.6870463142827932,
"eval_loss": 0.7653623223304749,
"eval_runtime": 8.8891,
"eval_samples_per_second": 8.887,
"eval_steps_per_second": 4.5,
"step": 420
},
{
"epoch": 0.703404559860955,
"grad_norm": 0.8858043762442335,
"learning_rate": 9.460856090192643e-06,
"loss": 0.7455,
"step": 430
},
{
"epoch": 0.7197628054391166,
"grad_norm": 0.7155973463395218,
"learning_rate": 9.417021595201864e-06,
"loss": 0.7527,
"step": 440
},
{
"epoch": 0.7197628054391166,
"eval_loss": 0.7620683908462524,
"eval_runtime": 8.8784,
"eval_samples_per_second": 8.898,
"eval_steps_per_second": 4.505,
"step": 440
},
{
"epoch": 0.7361210510172784,
"grad_norm": 0.737512769355951,
"learning_rate": 9.371583947769914e-06,
"loss": 0.7457,
"step": 450
},
{
"epoch": 0.7524792965954401,
"grad_norm": 0.8059791945207864,
"learning_rate": 9.324559639433874e-06,
"loss": 0.7498,
"step": 460
},
{
"epoch": 0.7524792965954401,
"eval_loss": 0.7595985531806946,
"eval_runtime": 8.8587,
"eval_samples_per_second": 8.918,
"eval_steps_per_second": 4.515,
"step": 460
},
{
"epoch": 0.7688375421736019,
"grad_norm": 0.8453208453911044,
"learning_rate": 9.2759657376074e-06,
"loss": 0.7399,
"step": 470
},
{
"epoch": 0.7851957877517636,
"grad_norm": 0.7794080969117457,
"learning_rate": 9.225819879386137e-06,
"loss": 0.7422,
"step": 480
},
{
"epoch": 0.7851957877517636,
"eval_loss": 0.757483959197998,
"eval_runtime": 8.8082,
"eval_samples_per_second": 8.969,
"eval_steps_per_second": 4.541,
"step": 480
},
{
"epoch": 0.8015540333299254,
"grad_norm": 0.7170361500782209,
"learning_rate": 9.174140265146356e-06,
"loss": 0.7366,
"step": 490
},
{
"epoch": 0.8179122789080872,
"grad_norm": 0.886247962125953,
"learning_rate": 9.120945651939146e-06,
"loss": 0.7385,
"step": 500
},
{
"epoch": 0.8179122789080872,
"eval_loss": 0.7534403800964355,
"eval_runtime": 8.8411,
"eval_samples_per_second": 8.936,
"eval_steps_per_second": 4.524,
"step": 500
},
{
"epoch": 0.8342705244862488,
"grad_norm": 0.7303115546636174,
"learning_rate": 9.066255346682567e-06,
"loss": 0.7426,
"step": 510
},
{
"epoch": 0.8506287700644106,
"grad_norm": 0.720904332688552,
"learning_rate": 9.010089199154238e-06,
"loss": 0.7396,
"step": 520
},
{
"epoch": 0.8506287700644106,
"eval_loss": 0.7510408163070679,
"eval_runtime": 8.8187,
"eval_samples_per_second": 8.958,
"eval_steps_per_second": 4.536,
"step": 520
},
{
"epoch": 0.8669870156425723,
"grad_norm": 0.7269189612270449,
"learning_rate": 8.952467594786867e-06,
"loss": 0.7372,
"step": 530
},
{
"epoch": 0.8833452612207341,
"grad_norm": 0.7411165689482159,
"learning_rate": 8.8934114472694e-06,
"loss": 0.7455,
"step": 540
},
{
"epoch": 0.8833452612207341,
"eval_loss": 0.7494012117385864,
"eval_runtime": 8.8703,
"eval_samples_per_second": 8.906,
"eval_steps_per_second": 4.509,
"step": 540
},
{
"epoch": 0.8997035067988958,
"grad_norm": 0.7222933451317762,
"learning_rate": 8.83294219095642e-06,
"loss": 0.7396,
"step": 550
},
{
"epoch": 0.9160617523770576,
"grad_norm": 0.7405492243894416,
"learning_rate": 8.771081773088568e-06,
"loss": 0.7275,
"step": 560
},
{
"epoch": 0.9160617523770576,
"eval_loss": 0.747172474861145,
"eval_runtime": 8.8684,
"eval_samples_per_second": 8.908,
"eval_steps_per_second": 4.51,
"step": 560
},
{
"epoch": 0.9324199979552193,
"grad_norm": 0.7343527467764037,
"learning_rate": 8.707852645826824e-06,
"loss": 0.7227,
"step": 570
},
{
"epoch": 0.948778243533381,
"grad_norm": 0.8149960345815405,
"learning_rate": 8.643277758103527e-06,
"loss": 0.74,
"step": 580
},
{
"epoch": 0.948778243533381,
"eval_loss": 0.745248019695282,
"eval_runtime": 8.8473,
"eval_samples_per_second": 8.929,
"eval_steps_per_second": 4.521,
"step": 580
},
{
"epoch": 0.9651364891115428,
"grad_norm": 0.7210978276382376,
"learning_rate": 8.577380547293089e-06,
"loss": 0.7387,
"step": 590
},
{
"epoch": 0.9814947346897045,
"grad_norm": 0.7095208468673599,
"learning_rate": 8.510184930705413e-06,
"loss": 0.7339,
"step": 600
},
{
"epoch": 0.9814947346897045,
"eval_loss": 0.7437242269515991,
"eval_runtime": 8.82,
"eval_samples_per_second": 8.957,
"eval_steps_per_second": 4.535,
"step": 600
},
{
"epoch": 0.9978529802678663,
"grad_norm": 0.7505905361093098,
"learning_rate": 8.44171529690516e-06,
"loss": 0.7248,
"step": 610
},
{
"epoch": 1.0143134648808916,
"grad_norm": 0.6979646401902959,
"learning_rate": 8.371996496859938e-06,
"loss": 0.7404,
"step": 620
},
{
"epoch": 1.0143134648808916,
"eval_loss": 0.7440437078475952,
"eval_runtime": 8.7724,
"eval_samples_per_second": 9.005,
"eval_steps_per_second": 4.56,
"step": 620
},
{
"epoch": 1.0306717104590533,
"grad_norm": 0.7236767629054202,
"learning_rate": 8.30105383492067e-06,
"loss": 0.6848,
"step": 630
},
{
"epoch": 1.047029956037215,
"grad_norm": 0.6872848093215694,
"learning_rate": 8.228913059637414e-06,
"loss": 0.6877,
"step": 640
},
{
"epoch": 1.047029956037215,
"eval_loss": 0.7421544790267944,
"eval_runtime": 8.8557,
"eval_samples_per_second": 8.921,
"eval_steps_per_second": 4.517,
"step": 640
},
{
"epoch": 1.0633882016153768,
"grad_norm": 0.7171174222919368,
"learning_rate": 8.155600354413971e-06,
"loss": 0.6752,
"step": 650
},
{
"epoch": 1.0797464471935385,
"grad_norm": 0.6937193535340093,
"learning_rate": 8.081142328004638e-06,
"loss": 0.6785,
"step": 660
},
{
"epoch": 1.0797464471935385,
"eval_loss": 0.7413665056228638,
"eval_runtime": 8.7177,
"eval_samples_per_second": 9.062,
"eval_steps_per_second": 4.588,
"step": 660
},
{
"epoch": 1.0961046927717002,
"grad_norm": 0.7347355982975387,
"learning_rate": 8.005566004856593e-06,
"loss": 0.6746,
"step": 670
},
{
"epoch": 1.1124629383498619,
"grad_norm": 0.8465986634067116,
"learning_rate": 7.928898815301418e-06,
"loss": 0.6723,
"step": 680
},
{
"epoch": 1.1124629383498619,
"eval_loss": 0.7399634122848511,
"eval_runtime": 8.9138,
"eval_samples_per_second": 8.863,
"eval_steps_per_second": 4.487,
"step": 680
},
{
"epoch": 1.1288211839280238,
"grad_norm": 0.7674101027640446,
"learning_rate": 7.851168585599285e-06,
"loss": 0.6756,
"step": 690
},
{
"epoch": 1.1451794295061855,
"grad_norm": 0.7525909134460642,
"learning_rate": 7.77240352783945e-06,
"loss": 0.6807,
"step": 700
},
{
"epoch": 1.1451794295061855,
"eval_loss": 0.7387439608573914,
"eval_runtime": 8.8038,
"eval_samples_per_second": 8.973,
"eval_steps_per_second": 4.544,
"step": 700
},
{
"epoch": 1.1615376750843471,
"grad_norm": 0.7798312578850733,
"learning_rate": 7.692632229700718e-06,
"loss": 0.6779,
"step": 710
},
{
"epoch": 1.177895920662509,
"grad_norm": 0.7474295968254813,
"learning_rate": 7.611883644075573e-06,
"loss": 0.6823,
"step": 720
},
{
"epoch": 1.177895920662509,
"eval_loss": 0.7377986907958984,
"eval_runtime": 8.8162,
"eval_samples_per_second": 8.961,
"eval_steps_per_second": 4.537,
"step": 720
},
{
"epoch": 1.1942541662406707,
"grad_norm": 0.7587723123453163,
"learning_rate": 7.5301870785617635e-06,
"loss": 0.675,
"step": 730
},
{
"epoch": 1.2106124118188324,
"grad_norm": 0.7440613541332155,
"learning_rate": 7.447572184825149e-06,
"loss": 0.6792,
"step": 740
},
{
"epoch": 1.2106124118188324,
"eval_loss": 0.7359431982040405,
"eval_runtime": 8.8106,
"eval_samples_per_second": 8.966,
"eval_steps_per_second": 4.54,
"step": 740
},
{
"epoch": 1.2269706573969943,
"grad_norm": 0.7463811292658835,
"learning_rate": 7.3640689478376695e-06,
"loss": 0.682,
"step": 750
},
{
"epoch": 1.243328902975156,
"grad_norm": 0.7709021578765768,
"learning_rate": 7.2797076749943214e-06,
"loss": 0.6799,
"step": 760
},
{
"epoch": 1.243328902975156,
"eval_loss": 0.7348581552505493,
"eval_runtime": 8.8852,
"eval_samples_per_second": 8.891,
"eval_steps_per_second": 4.502,
"step": 760
},
{
"epoch": 1.2596871485533176,
"grad_norm": 0.7343520193643218,
"learning_rate": 7.194518985113149e-06,
"loss": 0.6765,
"step": 770
},
{
"epoch": 1.2760453941314793,
"grad_norm": 0.7190112773845664,
"learning_rate": 7.108533797322166e-06,
"loss": 0.6804,
"step": 780
},
{
"epoch": 1.2760453941314793,
"eval_loss": 0.7342889904975891,
"eval_runtime": 8.8721,
"eval_samples_per_second": 8.904,
"eval_steps_per_second": 4.508,
"step": 780
},
{
"epoch": 1.292403639709641,
"grad_norm": 0.7133811365918709,
"learning_rate": 7.021783319837298e-06,
"loss": 0.6767,
"step": 790
},
{
"epoch": 1.308761885287803,
"grad_norm": 0.7307976869448247,
"learning_rate": 6.934299038635414e-06,
"loss": 0.6812,
"step": 800
},
{
"epoch": 1.308761885287803,
"eval_loss": 0.7325922846794128,
"eval_runtime": 8.8154,
"eval_samples_per_second": 8.962,
"eval_steps_per_second": 4.538,
"step": 800
},
{
"epoch": 1.3251201308659646,
"grad_norm": 0.7219505844302012,
"learning_rate": 6.8461127060265135e-06,
"loss": 0.679,
"step": 810
},
{
"epoch": 1.3414783764441265,
"grad_norm": 0.7057903127014294,
"learning_rate": 6.7572563291292724e-06,
"loss": 0.6714,
"step": 820
},
{
"epoch": 1.3414783764441265,
"eval_loss": 0.7310522794723511,
"eval_runtime": 8.9723,
"eval_samples_per_second": 8.805,
"eval_steps_per_second": 4.458,
"step": 820
},
{
"epoch": 1.3578366220222882,
"grad_norm": 0.7481237758190087,
"learning_rate": 6.667762158254104e-06,
"loss": 0.671,
"step": 830
},
{
"epoch": 1.3741948676004498,
"grad_norm": 0.6771887136482816,
"learning_rate": 6.57766267519794e-06,
"loss": 0.6764,
"step": 840
},
{
"epoch": 1.3741948676004498,
"eval_loss": 0.7305134534835815,
"eval_runtime": 8.7821,
"eval_samples_per_second": 8.996,
"eval_steps_per_second": 4.555,
"step": 840
},
{
"epoch": 1.3905531131786115,
"grad_norm": 0.7720857727869926,
"learning_rate": 6.486990581455011e-06,
"loss": 0.6819,
"step": 850
},
{
"epoch": 1.4069113587567732,
"grad_norm": 0.7488122619323809,
"learning_rate": 6.395778786347878e-06,
"loss": 0.6801,
"step": 860
},
{
"epoch": 1.4069113587567732,
"eval_loss": 0.7291988134384155,
"eval_runtime": 8.8848,
"eval_samples_per_second": 8.892,
"eval_steps_per_second": 4.502,
"step": 860
},
{
"epoch": 1.423269604334935,
"grad_norm": 0.7831777981437086,
"learning_rate": 6.304060395083031e-06,
"loss": 0.6687,
"step": 870
},
{
"epoch": 1.4396278499130968,
"grad_norm": 0.7391424956481155,
"learning_rate": 6.211868696735405e-06,
"loss": 0.6693,
"step": 880
},
{
"epoch": 1.4396278499130968,
"eval_loss": 0.727660059928894,
"eval_runtime": 8.8922,
"eval_samples_per_second": 8.884,
"eval_steps_per_second": 4.498,
"step": 880
},
{
"epoch": 1.4559860954912587,
"grad_norm": 0.6633367855169299,
"learning_rate": 6.119237152166139e-06,
"loss": 0.6657,
"step": 890
},
{
"epoch": 1.4723443410694204,
"grad_norm": 0.6920250318389223,
"learning_rate": 6.026199381878011e-06,
"loss": 0.6726,
"step": 900
},
{
"epoch": 1.4723443410694204,
"eval_loss": 0.7265572547912598,
"eval_runtime": 8.8512,
"eval_samples_per_second": 8.925,
"eval_steps_per_second": 4.519,
"step": 900
},
{
"epoch": 1.488702586647582,
"grad_norm": 0.7081219959408172,
"learning_rate": 5.932789153812911e-06,
"loss": 0.6672,
"step": 910
},
{
"epoch": 1.5050608322257437,
"grad_norm": 0.7707103364304354,
"learning_rate": 5.839040371095814e-06,
"loss": 0.6762,
"step": 920
},
{
"epoch": 1.5050608322257437,
"eval_loss": 0.7252368330955505,
"eval_runtime": 8.8447,
"eval_samples_per_second": 8.932,
"eval_steps_per_second": 4.522,
"step": 920
},
{
"epoch": 1.5214190778039054,
"grad_norm": 0.7285700753630275,
"learning_rate": 5.74498705972968e-06,
"loss": 0.6716,
"step": 930
},
{
"epoch": 1.5377773233820673,
"grad_norm": 0.6719863494102282,
"learning_rate": 5.650663356245756e-06,
"loss": 0.6666,
"step": 940
},
{
"epoch": 1.5377773233820673,
"eval_loss": 0.7246092557907104,
"eval_runtime": 8.7654,
"eval_samples_per_second": 9.013,
"eval_steps_per_second": 4.563,
"step": 940
},
{
"epoch": 1.554135568960229,
"grad_norm": 0.697927897632164,
"learning_rate": 5.556103495313774e-06,
"loss": 0.6679,
"step": 950
},
{
"epoch": 1.5704938145383909,
"grad_norm": 0.6843159800619036,
"learning_rate": 5.46134179731651e-06,
"loss": 0.6713,
"step": 960
},
{
"epoch": 1.5704938145383909,
"eval_loss": 0.7233731150627136,
"eval_runtime": 8.9372,
"eval_samples_per_second": 8.839,
"eval_steps_per_second": 4.476,
"step": 960
},
{
"epoch": 1.5868520601165526,
"grad_norm": 0.6766351504145809,
"learning_rate": 5.366412655893256e-06,
"loss": 0.6689,
"step": 970
},
{
"epoch": 1.6032103056947142,
"grad_norm": 0.7167144088999912,
"learning_rate": 5.271350525456679e-06,
"loss": 0.6659,
"step": 980
},
{
"epoch": 1.6032103056947142,
"eval_loss": 0.7226489186286926,
"eval_runtime": 8.789,
"eval_samples_per_second": 8.989,
"eval_steps_per_second": 4.551,
"step": 980
},
{
"epoch": 1.619568551272876,
"grad_norm": 0.7126879315212699,
"learning_rate": 5.176189908687646e-06,
"loss": 0.6695,
"step": 990
},
{
"epoch": 1.6359267968510376,
"grad_norm": 0.6583586257751949,
"learning_rate": 5.080965344012509e-06,
"loss": 0.6694,
"step": 1000
},
{
"epoch": 1.6359267968510376,
"eval_loss": 0.7216107249259949,
"eval_runtime": 8.8487,
"eval_samples_per_second": 8.928,
"eval_steps_per_second": 4.52,
"step": 1000
},
{
"epoch": 1.6522850424291995,
"grad_norm": 0.6561297313170917,
"learning_rate": 4.985711393067437e-06,
"loss": 0.6674,
"step": 1010
},
{
"epoch": 1.6686432880073612,
"grad_norm": 0.6626437713636341,
"learning_rate": 4.890462628154309e-06,
"loss": 0.6667,
"step": 1020
},
{
"epoch": 1.6686432880073612,
"eval_loss": 0.7210881114006042,
"eval_runtime": 8.7877,
"eval_samples_per_second": 8.99,
"eval_steps_per_second": 4.552,
"step": 1020
},
{
"epoch": 1.685001533585523,
"grad_norm": 0.7166433188372661,
"learning_rate": 4.795253619692737e-06,
"loss": 0.663,
"step": 1030
},
{
"epoch": 1.7013597791636847,
"grad_norm": 0.741234806048152,
"learning_rate": 4.700118923672779e-06,
"loss": 0.6739,
"step": 1040
},
{
"epoch": 1.7013597791636847,
"eval_loss": 0.7206884622573853,
"eval_runtime": 8.8528,
"eval_samples_per_second": 8.924,
"eval_steps_per_second": 4.518,
"step": 1040
},
{
"epoch": 1.7177180247418464,
"grad_norm": 0.7016761274840421,
"learning_rate": 4.605093069112892e-06,
"loss": 0.6711,
"step": 1050
},
{
"epoch": 1.734076270320008,
"grad_norm": 0.6970244053157433,
"learning_rate": 4.5102105455276525e-06,
"loss": 0.6745,
"step": 1060
},
{
"epoch": 1.734076270320008,
"eval_loss": 0.7203969955444336,
"eval_runtime": 8.8182,
"eval_samples_per_second": 8.959,
"eval_steps_per_second": 4.536,
"step": 1060
},
{
"epoch": 1.7504345158981698,
"grad_norm": 0.7335723892483486,
"learning_rate": 4.415505790409847e-06,
"loss": 0.6686,
"step": 1070
},
{
"epoch": 1.7667927614763317,
"grad_norm": 0.6956225175618133,
"learning_rate": 4.321013176731414e-06,
"loss": 0.6665,
"step": 1080
},
{
"epoch": 1.7667927614763317,
"eval_loss": 0.7191005945205688,
"eval_runtime": 8.9226,
"eval_samples_per_second": 8.854,
"eval_steps_per_second": 4.483,
"step": 1080
},
{
"epoch": 1.7831510070544934,
"grad_norm": 0.698587154786482,
"learning_rate": 4.226767000467826e-06,
"loss": 0.6666,
"step": 1090
},
{
"epoch": 1.7995092526326553,
"grad_norm": 0.7438773573796561,
"learning_rate": 4.1328014681504105e-06,
"loss": 0.6626,
"step": 1100
},
{
"epoch": 1.7995092526326553,
"eval_loss": 0.7183343768119812,
"eval_runtime": 8.8586,
"eval_samples_per_second": 8.918,
"eval_steps_per_second": 4.515,
"step": 1100
},
{
"epoch": 1.815867498210817,
"grad_norm": 0.6947292310543136,
"learning_rate": 4.039150684451117e-06,
"loss": 0.66,
"step": 1110
},
{
"epoch": 1.8322257437889786,
"grad_norm": 0.6477593004868593,
"learning_rate": 3.945848639804287e-06,
"loss": 0.6583,
"step": 1120
},
{
"epoch": 1.8322257437889786,
"eval_loss": 0.7178105711936951,
"eval_runtime": 8.8059,
"eval_samples_per_second": 8.971,
"eval_steps_per_second": 4.542,
"step": 1120
},
{
"epoch": 1.8485839893671403,
"grad_norm": 0.6753153859283428,
"learning_rate": 3.852929198069856e-06,
"loss": 0.6551,
"step": 1130
},
{
"epoch": 1.864942234945302,
"grad_norm": 0.734130420389638,
"learning_rate": 3.7604260842425146e-06,
"loss": 0.665,
"step": 1140
},
{
"epoch": 1.864942234945302,
"eval_loss": 0.7165009379386902,
"eval_runtime": 8.8639,
"eval_samples_per_second": 8.913,
"eval_steps_per_second": 4.513,
"step": 1140
},
{
"epoch": 1.8813004805234639,
"grad_norm": 0.6466537300434777,
"learning_rate": 3.668372872211266e-06,
"loss": 0.6601,
"step": 1150
},
{
"epoch": 1.8976587261016256,
"grad_norm": 0.6650377275740064,
"learning_rate": 3.5768029725738157e-06,
"loss": 0.6609,
"step": 1160
},
{
"epoch": 1.8976587261016256,
"eval_loss": 0.7164144515991211,
"eval_runtime": 8.8323,
"eval_samples_per_second": 8.944,
"eval_steps_per_second": 4.529,
"step": 1160
},
{
"epoch": 1.9140169716797875,
"grad_norm": 0.6705397336213909,
"learning_rate": 3.4857496205102475e-06,
"loss": 0.6558,
"step": 1170
},
{
"epoch": 1.9303752172579491,
"grad_norm": 0.6821158056706338,
"learning_rate": 3.3952458637203475e-06,
"loss": 0.6715,
"step": 1180
},
{
"epoch": 1.9303752172579491,
"eval_loss": 0.715703010559082,
"eval_runtime": 8.8492,
"eval_samples_per_second": 8.927,
"eval_steps_per_second": 4.52,
"step": 1180
},
{
"epoch": 1.9467334628361108,
"grad_norm": 0.6630764255722235,
"learning_rate": 3.3053245504289894e-06,
"loss": 0.656,
"step": 1190
},
{
"epoch": 1.9630917084142725,
"grad_norm": 0.669639110840792,
"learning_rate": 3.216018317463914e-06,
"loss": 0.6755,
"step": 1200
},
{
"epoch": 1.9630917084142725,
"eval_loss": 0.7149642109870911,
"eval_runtime": 8.6929,
"eval_samples_per_second": 9.088,
"eval_steps_per_second": 4.601,
"step": 1200
},
{
"epoch": 1.9794499539924342,
"grad_norm": 0.677640160297301,
"learning_rate": 3.1273595784102244e-06,
"loss": 0.6656,
"step": 1210
},
{
"epoch": 1.995808199570596,
"grad_norm": 0.6478481336084418,
"learning_rate": 3.0393805118459257e-06,
"loss": 0.6654,
"step": 1220
},
{
"epoch": 1.995808199570596,
"eval_loss": 0.7144020795822144,
"eval_runtime": 8.7728,
"eval_samples_per_second": 9.005,
"eval_steps_per_second": 4.56,
"step": 1220
},
{
"epoch": 2.0122686841836215,
"grad_norm": 0.7160988089736929,
"learning_rate": 2.952113049662744e-06,
"loss": 0.6911,
"step": 1230
},
{
"epoch": 2.028626929761783,
"grad_norm": 0.6658394367051226,
"learning_rate": 2.86558886547649e-06,
"loss": 0.6207,
"step": 1240
},
{
"epoch": 2.028626929761783,
"eval_loss": 0.7192620038986206,
"eval_runtime": 8.7807,
"eval_samples_per_second": 8.997,
"eval_steps_per_second": 4.555,
"step": 1240
},
{
"epoch": 2.044985175339945,
"grad_norm": 0.654677579170524,
"learning_rate": 2.7798393631311565e-06,
"loss": 0.6216,
"step": 1250
},
{
"epoch": 2.0613434209181065,
"grad_norm": 0.6845236856939948,
"learning_rate": 2.6948956653009416e-06,
"loss": 0.618,
"step": 1260
},
{
"epoch": 2.0613434209181065,
"eval_loss": 0.7191548347473145,
"eval_runtime": 8.8502,
"eval_samples_per_second": 8.926,
"eval_steps_per_second": 4.52,
"step": 1260
},
{
"epoch": 2.077701666496268,
"grad_norm": 0.6800346983315418,
"learning_rate": 2.6107886021942984e-06,
"loss": 0.6219,
"step": 1270
},
{
"epoch": 2.09405991207443,
"grad_norm": 0.6473993386866002,
"learning_rate": 2.527548700364164e-06,
"loss": 0.6182,
"step": 1280
},
{
"epoch": 2.09405991207443,
"eval_loss": 0.7187679409980774,
"eval_runtime": 8.8369,
"eval_samples_per_second": 8.94,
"eval_steps_per_second": 4.526,
"step": 1280
},
{
"epoch": 2.1104181576525916,
"grad_norm": 0.680332157190334,
"learning_rate": 2.4452061716283764e-06,
"loss": 0.6247,
"step": 1290
},
{
"epoch": 2.1267764032307537,
"grad_norm": 0.6713647024120337,
"learning_rate": 2.3637909021043332e-06,
"loss": 0.6188,
"step": 1300
},
{
"epoch": 2.1267764032307537,
"eval_loss": 0.7195940017700195,
"eval_runtime": 8.934,
"eval_samples_per_second": 8.843,
"eval_steps_per_second": 4.477,
"step": 1300
},
{
"epoch": 2.1431346488089154,
"grad_norm": 0.6905966915530017,
"learning_rate": 2.2833324413618695e-06,
"loss": 0.6213,
"step": 1310
},
{
"epoch": 2.159492894387077,
"grad_norm": 0.6961837258954594,
"learning_rate": 2.2038599916982775e-06,
"loss": 0.6173,
"step": 1320
},
{
"epoch": 2.159492894387077,
"eval_loss": 0.718852162361145,
"eval_runtime": 8.7975,
"eval_samples_per_second": 8.98,
"eval_steps_per_second": 4.547,
"step": 1320
},
{
"epoch": 2.1758511399652387,
"grad_norm": 0.7109159972241073,
"learning_rate": 2.1254023975393585e-06,
"loss": 0.6193,
"step": 1330
},
{
"epoch": 2.1922093855434004,
"grad_norm": 0.6856177780539675,
"learning_rate": 2.0479881349703885e-06,
"loss": 0.6206,
"step": 1340
},
{
"epoch": 2.1922093855434004,
"eval_loss": 0.719401478767395,
"eval_runtime": 9.6891,
"eval_samples_per_second": 8.154,
"eval_steps_per_second": 4.128,
"step": 1340
},
{
"epoch": 2.208567631121562,
"grad_norm": 0.6846382708470592,
"learning_rate": 1.9716453014007493e-06,
"loss": 0.6172,
"step": 1350
},
{
"epoch": 2.2249258766997237,
"grad_norm": 0.6821924271490651,
"learning_rate": 1.8964016053660167e-06,
"loss": 0.6206,
"step": 1360
},
{
"epoch": 2.2249258766997237,
"eval_loss": 0.7185901403427124,
"eval_runtime": 10.538,
"eval_samples_per_second": 7.497,
"eval_steps_per_second": 3.796,
"step": 1360
},
{
"epoch": 2.241284122277886,
"grad_norm": 0.7020054137197129,
"learning_rate": 1.822284356471179e-06,
"loss": 0.628,
"step": 1370
},
{
"epoch": 2.2576423678560475,
"grad_norm": 0.6827467082055568,
"learning_rate": 1.7493204554786453e-06,
"loss": 0.6192,
"step": 1380
},
{
"epoch": 2.2576423678560475,
"eval_loss": 0.7185772657394409,
"eval_runtime": 8.7402,
"eval_samples_per_second": 9.039,
"eval_steps_per_second": 4.577,
"step": 1380
},
{
"epoch": 2.2740006134342092,
"grad_norm": 0.6787057813877333,
"learning_rate": 1.6775363845446552e-06,
"loss": 0.6215,
"step": 1390
},
{
"epoch": 2.290358859012371,
"grad_norm": 0.6965105305161102,
"learning_rate": 1.6069581976076059e-06,
"loss": 0.6168,
"step": 1400
},
{
"epoch": 2.290358859012371,
"eval_loss": 0.7182484269142151,
"eval_runtime": 8.8061,
"eval_samples_per_second": 8.971,
"eval_steps_per_second": 4.542,
"step": 1400
},
{
"epoch": 2.3067171045905326,
"grad_norm": 0.657899834231394,
"learning_rate": 1.537611510931819e-06,
"loss": 0.622,
"step": 1410
},
{
"epoch": 2.3230753501686943,
"grad_norm": 0.6763933278225029,
"learning_rate": 1.4695214938101394e-06,
"loss": 0.6247,
"step": 1420
},
{
"epoch": 2.3230753501686943,
"eval_loss": 0.7175112962722778,
"eval_runtime": 8.833,
"eval_samples_per_second": 8.944,
"eval_steps_per_second": 4.528,
"step": 1420
},
{
"epoch": 2.339433595746856,
"grad_norm": 0.6445654002256987,
"learning_rate": 1.4027128594287743e-06,
"loss": 0.6124,
"step": 1430
},
{
"epoch": 2.355791841325018,
"grad_norm": 0.6483502135699247,
"learning_rate": 1.3372098558976753e-06,
"loss": 0.6152,
"step": 1440
},
{
"epoch": 2.355791841325018,
"eval_loss": 0.7176142930984497,
"eval_runtime": 8.7696,
"eval_samples_per_second": 9.008,
"eval_steps_per_second": 4.561,
"step": 1440
},
{
"epoch": 2.3721500869031797,
"grad_norm": 0.6748060520638486,
"learning_rate": 1.2730362574497146e-06,
"loss": 0.6191,
"step": 1450
},
{
"epoch": 2.3885083324813414,
"grad_norm": 0.6411389753036478,
"learning_rate": 1.2102153558118635e-06,
"loss": 0.6118,
"step": 1460
},
{
"epoch": 2.3885083324813414,
"eval_loss": 0.7174015641212463,
"eval_runtime": 8.8824,
"eval_samples_per_second": 8.894,
"eval_steps_per_second": 4.503,
"step": 1460
},
{
"epoch": 2.404866578059503,
"grad_norm": 0.6147421703730933,
"learning_rate": 1.1487699517514828e-06,
"loss": 0.6213,
"step": 1470
},
{
"epoch": 2.421224823637665,
"grad_norm": 0.6564011388418958,
"learning_rate": 1.088722346800813e-06,
"loss": 0.6192,
"step": 1480
},
{
"epoch": 2.421224823637665,
"eval_loss": 0.7174366116523743,
"eval_runtime": 8.8213,
"eval_samples_per_second": 8.956,
"eval_steps_per_second": 4.534,
"step": 1480
},
{
"epoch": 2.4375830692158265,
"grad_norm": 0.6637159235119273,
"learning_rate": 1.0300943351626642e-06,
"loss": 0.6172,
"step": 1490
},
{
"epoch": 2.4539413147939886,
"grad_norm": 0.6399937055494419,
"learning_rate": 9.729071958002356e-07,
"loss": 0.6125,
"step": 1500
},
{
"epoch": 2.4539413147939886,
"eval_loss": 0.7172605395317078,
"eval_runtime": 8.8588,
"eval_samples_per_second": 8.918,
"eval_steps_per_second": 4.515,
"step": 1500
},
{
"epoch": 2.4702995603721503,
"grad_norm": 0.6548848015845544,
"learning_rate": 9.171816847139447e-07,
"loss": 0.6197,
"step": 1510
},
{
"epoch": 2.486657805950312,
"grad_norm": 0.652681143911981,
"learning_rate": 8.62938027408064e-07,
"loss": 0.6067,
"step": 1520
},
{
"epoch": 2.486657805950312,
"eval_loss": 0.7170479893684387,
"eval_runtime": 8.821,
"eval_samples_per_second": 8.956,
"eval_steps_per_second": 4.535,
"step": 1520
},
{
"epoch": 2.5030160515284736,
"grad_norm": 0.6223909947895746,
"learning_rate": 8.10195911549892e-07,
"loss": 0.6229,
"step": 1530
},
{
"epoch": 2.5193742971066353,
"grad_norm": 0.6675446368924337,
"learning_rate": 7.589744798241472e-07,
"loss": 0.6196,
"step": 1540
},
{
"epoch": 2.5193742971066353,
"eval_loss": 0.7164150476455688,
"eval_runtime": 8.8151,
"eval_samples_per_second": 8.962,
"eval_steps_per_second": 4.538,
"step": 1540
},
{
"epoch": 2.535732542684797,
"grad_norm": 0.6748594529596444,
"learning_rate": 7.092923229851506e-07,
"loss": 0.6259,
"step": 1550
},
{
"epoch": 2.5520907882629587,
"grad_norm": 0.6436382148035624,
"learning_rate": 6.611674731093376e-07,
"loss": 0.6147,
"step": 1560
},
{
"epoch": 2.5520907882629587,
"eval_loss": 0.7163916826248169,
"eval_runtime": 8.7789,
"eval_samples_per_second": 8.999,
"eval_steps_per_second": 4.556,
"step": 1560
},
{
"epoch": 2.5684490338411203,
"grad_norm": 0.6954982109692219,
"learning_rate": 6.146173970505387e-07,
"loss": 0.6221,
"step": 1570
},
{
"epoch": 2.584807279419282,
"grad_norm": 0.6636316314884414,
"learning_rate": 5.696589901004001e-07,
"loss": 0.6166,
"step": 1580
},
{
"epoch": 2.584807279419282,
"eval_loss": 0.7162200808525085,
"eval_runtime": 8.8522,
"eval_samples_per_second": 8.924,
"eval_steps_per_second": 4.519,
"step": 1580
},
{
"epoch": 2.601165524997444,
"grad_norm": 0.6348189748255966,
"learning_rate": 5.263085698562675e-07,
"loss": 0.6162,
"step": 1590
},
{
"epoch": 2.617523770575606,
"grad_norm": 0.6656187164775476,
"learning_rate": 4.845818702987287e-07,
"loss": 0.6235,
"step": 1600
},
{
"epoch": 2.617523770575606,
"eval_loss": 0.7161623239517212,
"eval_runtime": 8.9168,
"eval_samples_per_second": 8.86,
"eval_steps_per_second": 4.486,
"step": 1600
},
{
"epoch": 2.6338820161537675,
"grad_norm": 0.644537564705373,
"learning_rate": 4.4449403608098965e-07,
"loss": 0.6126,
"step": 1610
},
{
"epoch": 2.650240261731929,
"grad_norm": 0.6394762326342863,
"learning_rate": 4.0605961703213845e-07,
"loss": 0.6112,
"step": 1620
},
{
"epoch": 2.650240261731929,
"eval_loss": 0.7161468267440796,
"eval_runtime": 8.9454,
"eval_samples_per_second": 8.831,
"eval_steps_per_second": 4.472,
"step": 1620
},
{
"epoch": 2.666598507310091,
"grad_norm": 0.6651958412760407,
"learning_rate": 3.692925628763033e-07,
"loss": 0.6218,
"step": 1630
},
{
"epoch": 2.682956752888253,
"grad_norm": 0.6547622124607134,
"learning_rate": 3.3420621816961874e-07,
"loss": 0.6186,
"step": 1640
},
{
"epoch": 2.682956752888253,
"eval_loss": 0.7159026265144348,
"eval_runtime": 8.8156,
"eval_samples_per_second": 8.961,
"eval_steps_per_second": 4.537,
"step": 1640
},
{
"epoch": 2.6993149984664147,
"grad_norm": 0.6391386777884149,
"learning_rate": 3.0081331745683395e-07,
"loss": 0.613,
"step": 1650
},
{
"epoch": 2.7156732440445763,
"grad_norm": 0.6368321437722378,
"learning_rate": 2.691259806493296e-07,
"loss": 0.6164,
"step": 1660
},
{
"epoch": 2.7156732440445763,
"eval_loss": 0.7158184051513672,
"eval_runtime": 8.7951,
"eval_samples_per_second": 8.982,
"eval_steps_per_second": 4.548,
"step": 1660
},
{
"epoch": 2.732031489622738,
"grad_norm": 0.6365243949451423,
"learning_rate": 2.391557086262086e-07,
"loss": 0.6228,
"step": 1670
},
{
"epoch": 2.7483897352008997,
"grad_norm": 0.6321404050434235,
"learning_rate": 2.109133790600648e-07,
"loss": 0.6147,
"step": 1680
},
{
"epoch": 2.7483897352008997,
"eval_loss": 0.7158446311950684,
"eval_runtime": 8.8513,
"eval_samples_per_second": 8.925,
"eval_steps_per_second": 4.519,
"step": 1680
},
{
"epoch": 2.7647479807790614,
"grad_norm": 0.6903153602015651,
"learning_rate": 1.8440924246894753e-07,
"loss": 0.6158,
"step": 1690
},
{
"epoch": 2.781106226357223,
"grad_norm": 0.6670191692486903,
"learning_rate": 1.5965291849594944e-07,
"loss": 0.6098,
"step": 1700
},
{
"epoch": 2.781106226357223,
"eval_loss": 0.7158127427101135,
"eval_runtime": 8.8596,
"eval_samples_per_second": 8.917,
"eval_steps_per_second": 4.515,
"step": 1700
},
{
"epoch": 2.7974644719353847,
"grad_norm": 0.6357999130773923,
"learning_rate": 1.3665339241776765e-07,
"loss": 0.6212,
"step": 1710
},
{
"epoch": 2.8138227175135464,
"grad_norm": 0.6630012411471573,
"learning_rate": 1.1541901188351134e-07,
"loss": 0.6126,
"step": 1720
},
{
"epoch": 2.8138227175135464,
"eval_loss": 0.7158257365226746,
"eval_runtime": 9.6888,
"eval_samples_per_second": 8.154,
"eval_steps_per_second": 4.128,
"step": 1720
},
{
"epoch": 2.8301809630917085,
"grad_norm": 0.6453855721831139,
"learning_rate": 9.59574838849281e-08,
"loss": 0.6162,
"step": 1730
},
{
"epoch": 2.84653920866987,
"grad_norm": 0.647793683675027,
"learning_rate": 7.827587195916697e-08,
"loss": 0.6179,
"step": 1740
},
{
"epoch": 2.84653920866987,
"eval_loss": 0.7156850695610046,
"eval_runtime": 8.8101,
"eval_samples_per_second": 8.967,
"eval_steps_per_second": 4.54,
"step": 1740
},
{
"epoch": 2.862897454248032,
"grad_norm": 0.627906665508174,
"learning_rate": 6.238059362507043e-08,
"loss": 0.6157,
"step": 1750
},
{
"epoch": 2.8792556998261936,
"grad_norm": 0.6991824017680971,
"learning_rate": 4.827741805395025e-08,
"loss": 0.6229,
"step": 1760
},
{
"epoch": 2.8792556998261936,
"eval_loss": 0.7156457901000977,
"eval_runtime": 8.8917,
"eval_samples_per_second": 8.885,
"eval_steps_per_second": 4.499,
"step": 1760
},
{
"epoch": 2.8956139454043552,
"grad_norm": 0.6604597808333147,
"learning_rate": 3.59714639756692e-08,
"loss": 0.6198,
"step": 1770
},
{
"epoch": 2.9119721909825174,
"grad_norm": 0.6412019833547566,
"learning_rate": 2.5467197820805977e-08,
"loss": 0.624,
"step": 1780
},
{
"epoch": 2.9119721909825174,
"eval_loss": 0.7153981328010559,
"eval_runtime": 8.897,
"eval_samples_per_second": 8.879,
"eval_steps_per_second": 4.496,
"step": 1780
},
{
"epoch": 2.928330436560679,
"grad_norm": 0.6539771563156503,
"learning_rate": 1.6768432099570907e-08,
"loss": 0.6181,
"step": 1790
},
{
"epoch": 2.9446886821388407,
"grad_norm": 0.6484588822526755,
"learning_rate": 9.878324018058327e-09,
"loss": 0.6155,
"step": 1800
},
{
"epoch": 2.9446886821388407,
"eval_loss": 0.7157188653945923,
"eval_runtime": 8.8628,
"eval_samples_per_second": 8.914,
"eval_steps_per_second": 4.513,
"step": 1800
},
{
"epoch": 2.9610469277170024,
"grad_norm": 0.6119368873622615,
"learning_rate": 4.799374332344342e-09,
"loss": 0.6189,
"step": 1810
},
{
"epoch": 2.977405173295164,
"grad_norm": 0.6333710387576432,
"learning_rate": 1.533426440839536e-09,
"loss": 0.6211,
"step": 1820
},
{
"epoch": 2.977405173295164,
"eval_loss": 0.7156268358230591,
"eval_runtime": 8.9113,
"eval_samples_per_second": 8.865,
"eval_steps_per_second": 4.489,
"step": 1820
},
{
"epoch": 2.9937634188733258,
"grad_norm": 0.6603951738557682,
"learning_rate": 8.166571522916222e-11,
"loss": 0.6176,
"step": 1830
},
{
"epoch": 2.9986708925467744,
"step": 1833,
"total_flos": 531028544847872.0,
"train_loss": 0.7064719163714637,
"train_runtime": 53129.6318,
"train_samples_per_second": 2.209,
"train_steps_per_second": 0.035
}
],
"logging_steps": 10,
"max_steps": 1833,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 20,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 531028544847872.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}