tomvoelker's picture
End of training
76f9811 verified
{
"best_metric": 0.09976539827685843,
"best_model_checkpoint": "/bartabsa-reproduce/outputs/gpt22gpt2_42_std_percentile/checkpoint-16000",
"epoch": 2.999832822513235,
"eval_steps": 2000,
"global_step": 26916,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05572582892170521,
"grad_norm": 2.3220622539520264,
"learning_rate": 2.5e-05,
"loss": 3.4893,
"step": 500
},
{
"epoch": 0.11145165784341042,
"grad_norm": 2.527012586593628,
"learning_rate": 5e-05,
"loss": 2.9952,
"step": 1000
},
{
"epoch": 0.16717748676511562,
"grad_norm": 1.4452062845230103,
"learning_rate": 4.9035344960642076e-05,
"loss": 2.8253,
"step": 1500
},
{
"epoch": 0.22290331568682084,
"grad_norm": 1.5733131170272827,
"learning_rate": 4.807068992128415e-05,
"loss": 2.7029,
"step": 2000
},
{
"epoch": 0.22290331568682084,
"eval_loss": 2.542654514312744,
"eval_rouge1": 0.24369185533364146,
"eval_rouge2": 0.053150811665838404,
"eval_rougeL": 0.1468615883810676,
"eval_rougeLsum": 0.22845947792903248,
"eval_runtime": 6410.9465,
"eval_samples_per_second": 2.085,
"eval_steps_per_second": 0.13,
"step": 2000
},
{
"epoch": 0.27862914460852606,
"grad_norm": 1.733468770980835,
"learning_rate": 4.7106034881926225e-05,
"loss": 2.6421,
"step": 2500
},
{
"epoch": 0.33435497353023125,
"grad_norm": 1.6630736589431763,
"learning_rate": 4.61413798425683e-05,
"loss": 2.6013,
"step": 3000
},
{
"epoch": 0.3900808024519365,
"grad_norm": 3.9945011138916016,
"learning_rate": 4.517672480321037e-05,
"loss": 2.5585,
"step": 3500
},
{
"epoch": 0.4458066313736417,
"grad_norm": 1.595192313194275,
"learning_rate": 4.421206976385245e-05,
"loss": 2.5218,
"step": 4000
},
{
"epoch": 0.4458066313736417,
"eval_loss": 2.3777079582214355,
"eval_rouge1": 0.26361751739528855,
"eval_rouge2": 0.06605883769296508,
"eval_rougeL": 0.15780235954372396,
"eval_rougeLsum": 0.24547300255336824,
"eval_runtime": 6409.59,
"eval_samples_per_second": 2.086,
"eval_steps_per_second": 0.13,
"step": 4000
},
{
"epoch": 0.5015324602953469,
"grad_norm": 1.5933812856674194,
"learning_rate": 4.324741472449452e-05,
"loss": 2.4992,
"step": 4500
},
{
"epoch": 0.5572582892170521,
"grad_norm": 1.5219066143035889,
"learning_rate": 4.2282759685136595e-05,
"loss": 2.4691,
"step": 5000
},
{
"epoch": 0.6129841181387573,
"grad_norm": 1.4694565534591675,
"learning_rate": 4.131810464577867e-05,
"loss": 2.4356,
"step": 5500
},
{
"epoch": 0.6687099470604625,
"grad_norm": 1.9962424039840698,
"learning_rate": 4.035344960642074e-05,
"loss": 2.4141,
"step": 6000
},
{
"epoch": 0.6687099470604625,
"eval_loss": 2.2899107933044434,
"eval_rouge1": 0.2682906048506787,
"eval_rouge2": 0.06950238260179739,
"eval_rougeL": 0.1587441828998173,
"eval_rougeLsum": 0.25060189239845465,
"eval_runtime": 6396.129,
"eval_samples_per_second": 2.09,
"eval_steps_per_second": 0.131,
"step": 6000
},
{
"epoch": 0.7244357759821677,
"grad_norm": 6.416402816772461,
"learning_rate": 3.938879456706282e-05,
"loss": 2.3945,
"step": 6500
},
{
"epoch": 0.780161604903873,
"grad_norm": 1.5234980583190918,
"learning_rate": 3.84241395277049e-05,
"loss": 2.3738,
"step": 7000
},
{
"epoch": 0.8358874338255782,
"grad_norm": 4.883070468902588,
"learning_rate": 3.745948448834697e-05,
"loss": 2.3584,
"step": 7500
},
{
"epoch": 0.8916132627472834,
"grad_norm": 1.8043371438980103,
"learning_rate": 3.6494829448989046e-05,
"loss": 2.3452,
"step": 8000
},
{
"epoch": 0.8916132627472834,
"eval_loss": 2.219862937927246,
"eval_rouge1": 0.2887851100955251,
"eval_rouge2": 0.08135324671358818,
"eval_rougeL": 0.17143312551180023,
"eval_rougeLsum": 0.2693751508675857,
"eval_runtime": 6386.9905,
"eval_samples_per_second": 2.093,
"eval_steps_per_second": 0.131,
"step": 8000
},
{
"epoch": 0.9473390916689886,
"grad_norm": 2.5791819095611572,
"learning_rate": 3.553017440963112e-05,
"loss": 2.325,
"step": 8500
},
{
"epoch": 1.0030649205906939,
"grad_norm": 1.7074276208877563,
"learning_rate": 3.4565519370273194e-05,
"loss": 2.2897,
"step": 9000
},
{
"epoch": 1.058790749512399,
"grad_norm": 1.7298952341079712,
"learning_rate": 3.360086433091527e-05,
"loss": 2.0062,
"step": 9500
},
{
"epoch": 1.1145165784341042,
"grad_norm": 1.575429916381836,
"learning_rate": 3.263620929155734e-05,
"loss": 2.0037,
"step": 10000
},
{
"epoch": 1.1145165784341042,
"eval_loss": 2.1816983222961426,
"eval_rouge1": 0.29842511392124504,
"eval_rouge2": 0.08508044071327496,
"eval_rougeL": 0.17288333694802113,
"eval_rougeLsum": 0.2771806390068811,
"eval_runtime": 6383.653,
"eval_samples_per_second": 2.094,
"eval_steps_per_second": 0.131,
"step": 10000
},
{
"epoch": 1.1702424073558095,
"grad_norm": 1.666231632232666,
"learning_rate": 3.1671554252199416e-05,
"loss": 2.0088,
"step": 10500
},
{
"epoch": 1.2259682362775146,
"grad_norm": 1.6241954565048218,
"learning_rate": 3.070689921284149e-05,
"loss": 2.0029,
"step": 11000
},
{
"epoch": 1.28169406519922,
"grad_norm": 1.9751548767089844,
"learning_rate": 2.9742244173483564e-05,
"loss": 1.9862,
"step": 11500
},
{
"epoch": 1.337419894120925,
"grad_norm": 13.691629409790039,
"learning_rate": 2.8777589134125638e-05,
"loss": 1.9905,
"step": 12000
},
{
"epoch": 1.337419894120925,
"eval_loss": 2.1423134803771973,
"eval_rouge1": 0.29073567485913165,
"eval_rouge2": 0.08227824922946644,
"eval_rougeL": 0.17132293154574824,
"eval_rougeLsum": 0.2718749763727648,
"eval_runtime": 6368.6616,
"eval_samples_per_second": 2.099,
"eval_steps_per_second": 0.131,
"step": 12000
},
{
"epoch": 1.3931457230426303,
"grad_norm": 1.6704634428024292,
"learning_rate": 2.7812934094767712e-05,
"loss": 1.9886,
"step": 12500
},
{
"epoch": 1.4488715519643356,
"grad_norm": 2.2663302421569824,
"learning_rate": 2.6848279055409786e-05,
"loss": 1.9843,
"step": 13000
},
{
"epoch": 1.5045973808860407,
"grad_norm": 1.9374725818634033,
"learning_rate": 2.588362401605186e-05,
"loss": 1.9773,
"step": 13500
},
{
"epoch": 1.5603232098077457,
"grad_norm": 2.878082513809204,
"learning_rate": 2.4918968976693934e-05,
"loss": 1.9711,
"step": 14000
},
{
"epoch": 1.5603232098077457,
"eval_loss": 2.0982916355133057,
"eval_rouge1": 0.3054580501233143,
"eval_rouge2": 0.09153227846352645,
"eval_rougeL": 0.1785600815773608,
"eval_rougeLsum": 0.2849821221895824,
"eval_runtime": 6387.6311,
"eval_samples_per_second": 2.093,
"eval_steps_per_second": 0.131,
"step": 14000
},
{
"epoch": 1.616049038729451,
"grad_norm": 3.682783365249634,
"learning_rate": 2.3954313937336008e-05,
"loss": 1.9642,
"step": 14500
},
{
"epoch": 1.6717748676511563,
"grad_norm": 1.605305790901184,
"learning_rate": 2.2989658897978082e-05,
"loss": 1.9557,
"step": 15000
},
{
"epoch": 1.7275006965728616,
"grad_norm": 2.3347930908203125,
"learning_rate": 2.2025003858620156e-05,
"loss": 1.9482,
"step": 15500
},
{
"epoch": 1.7832265254945667,
"grad_norm": 1.8029910326004028,
"learning_rate": 2.1060348819262234e-05,
"loss": 1.9476,
"step": 16000
},
{
"epoch": 1.7832265254945667,
"eval_loss": 2.0546956062316895,
"eval_rouge1": 0.31858576030641184,
"eval_rouge2": 0.09976539827685843,
"eval_rougeL": 0.1858053683814233,
"eval_rougeLsum": 0.2971687535876159,
"eval_runtime": 6388.7784,
"eval_samples_per_second": 2.092,
"eval_steps_per_second": 0.131,
"step": 16000
},
{
"epoch": 1.8389523544162718,
"grad_norm": 3.6298651695251465,
"learning_rate": 2.0095693779904308e-05,
"loss": 1.9399,
"step": 16500
},
{
"epoch": 1.894678183337977,
"grad_norm": 1.8237981796264648,
"learning_rate": 1.9131038740546382e-05,
"loss": 1.9255,
"step": 17000
},
{
"epoch": 1.9504040122596824,
"grad_norm": 1.8469113111495972,
"learning_rate": 1.8166383701188456e-05,
"loss": 1.9222,
"step": 17500
},
{
"epoch": 2.0061298411813877,
"grad_norm": 2.2777647972106934,
"learning_rate": 1.720172866183053e-05,
"loss": 1.8964,
"step": 18000
},
{
"epoch": 2.0061298411813877,
"eval_loss": 2.0509514808654785,
"eval_rouge1": 0.3031551742963028,
"eval_rouge2": 0.09186053471142167,
"eval_rougeL": 0.17780386601249287,
"eval_rougeLsum": 0.28264405636299283,
"eval_runtime": 6380.9224,
"eval_samples_per_second": 2.095,
"eval_steps_per_second": 0.131,
"step": 18000
},
{
"epoch": 2.0618556701030926,
"grad_norm": 21.444568634033203,
"learning_rate": 1.6237073622472604e-05,
"loss": 1.6354,
"step": 18500
},
{
"epoch": 2.117581499024798,
"grad_norm": 2.026742935180664,
"learning_rate": 1.5272418583114678e-05,
"loss": 1.6349,
"step": 19000
},
{
"epoch": 2.173307327946503,
"grad_norm": 2.846383571624756,
"learning_rate": 1.4307763543756752e-05,
"loss": 1.6404,
"step": 19500
},
{
"epoch": 2.2290331568682085,
"grad_norm": 2.181227684020996,
"learning_rate": 1.3343108504398828e-05,
"loss": 1.6389,
"step": 20000
},
{
"epoch": 2.2290331568682085,
"eval_loss": 2.048452615737915,
"eval_rouge1": 0.31852665424090676,
"eval_rouge2": 0.0997116570016274,
"eval_rougeL": 0.1856553825562861,
"eval_rougeLsum": 0.29617151453401813,
"eval_runtime": 6398.3205,
"eval_samples_per_second": 2.089,
"eval_steps_per_second": 0.131,
"step": 20000
},
{
"epoch": 2.2847589857899138,
"grad_norm": 2.3432796001434326,
"learning_rate": 1.2378453465040902e-05,
"loss": 1.63,
"step": 20500
},
{
"epoch": 2.340484814711619,
"grad_norm": 3.7577052116394043,
"learning_rate": 1.1413798425682977e-05,
"loss": 1.6345,
"step": 21000
},
{
"epoch": 2.396210643633324,
"grad_norm": 2.6469638347625732,
"learning_rate": 1.0449143386325052e-05,
"loss": 1.6257,
"step": 21500
},
{
"epoch": 2.4519364725550292,
"grad_norm": 1.9295642375946045,
"learning_rate": 9.484488346967126e-06,
"loss": 1.6251,
"step": 22000
},
{
"epoch": 2.4519364725550292,
"eval_loss": 2.0327651500701904,
"eval_rouge1": 0.2991586762996594,
"eval_rouge2": 0.0889418449780405,
"eval_rougeL": 0.1742592763849186,
"eval_rougeLsum": 0.2782439206965389,
"eval_runtime": 6392.6136,
"eval_samples_per_second": 2.091,
"eval_steps_per_second": 0.131,
"step": 22000
},
{
"epoch": 2.5076623014767345,
"grad_norm": 3.863924264907837,
"learning_rate": 8.5198333076092e-06,
"loss": 1.6185,
"step": 22500
},
{
"epoch": 2.56338813039844,
"grad_norm": 13.475985527038574,
"learning_rate": 7.5551782682512745e-06,
"loss": 1.6168,
"step": 23000
},
{
"epoch": 2.6191139593201447,
"grad_norm": 20.322145462036133,
"learning_rate": 6.5905232288933485e-06,
"loss": 1.6173,
"step": 23500
},
{
"epoch": 2.67483978824185,
"grad_norm": 2.083725929260254,
"learning_rate": 5.6258681895354226e-06,
"loss": 1.6119,
"step": 24000
},
{
"epoch": 2.67483978824185,
"eval_loss": 2.013817310333252,
"eval_rouge1": 0.30453205743071354,
"eval_rouge2": 0.0939110545351104,
"eval_rougeL": 0.17781639483308936,
"eval_rougeLsum": 0.28319401801413485,
"eval_runtime": 6384.3929,
"eval_samples_per_second": 2.094,
"eval_steps_per_second": 0.131,
"step": 24000
},
{
"epoch": 2.7305656171635553,
"grad_norm": 2.3929226398468018,
"learning_rate": 4.661213150177497e-06,
"loss": 1.6204,
"step": 24500
},
{
"epoch": 2.7862914460852606,
"grad_norm": 2.083616256713867,
"learning_rate": 3.6965581108195706e-06,
"loss": 1.6091,
"step": 25000
},
{
"epoch": 2.842017275006966,
"grad_norm": 1.816172480583191,
"learning_rate": 2.7319030714616455e-06,
"loss": 1.6104,
"step": 25500
},
{
"epoch": 2.897743103928671,
"grad_norm": 7.610255241394043,
"learning_rate": 1.7672480321037198e-06,
"loss": 1.6093,
"step": 26000
},
{
"epoch": 2.897743103928671,
"eval_loss": 2.0071072578430176,
"eval_rouge1": 0.30614503423795814,
"eval_rouge2": 0.09393803869603295,
"eval_rougeL": 0.17798792217037074,
"eval_rougeLsum": 0.28484623142325816,
"eval_runtime": 6392.1852,
"eval_samples_per_second": 2.091,
"eval_steps_per_second": 0.131,
"step": 26000
},
{
"epoch": 2.953468932850376,
"grad_norm": 5.285951137542725,
"learning_rate": 8.025929927457941e-07,
"loss": 1.6078,
"step": 26500
},
{
"epoch": 2.999832822513235,
"step": 26916,
"total_flos": 4.3736503104621773e+18,
"train_loss": 2.05411821950118,
"train_runtime": 108823.6651,
"train_samples_per_second": 7.915,
"train_steps_per_second": 0.247
}
],
"logging_steps": 500,
"max_steps": 26916,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 2000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.3736503104621773e+18,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}