| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.0, | |
| "eval_steps": 500, | |
| "global_step": 70, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.02857142857142857, | |
| "grad_norm": 0.03305287514367692, | |
| "learning_rate": 7.142857142857143e-06, | |
| "loss": 0.1788, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.05714285714285714, | |
| "grad_norm": 0.031936320817185844, | |
| "learning_rate": 1.4285714285714285e-05, | |
| "loss": 0.1845, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.08571428571428572, | |
| "grad_norm": 0.04074863585272115, | |
| "learning_rate": 2.1428571428571428e-05, | |
| "loss": 0.1837, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.11428571428571428, | |
| "grad_norm": 0.035864053585471116, | |
| "learning_rate": 2.857142857142857e-05, | |
| "loss": 0.1851, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.14285714285714285, | |
| "grad_norm": 0.03092198126963985, | |
| "learning_rate": 3.571428571428572e-05, | |
| "loss": 0.1784, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.17142857142857143, | |
| "grad_norm": 0.038462538224975085, | |
| "learning_rate": 4.2857142857142856e-05, | |
| "loss": 0.1771, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 0.03888233843876885, | |
| "learning_rate": 5e-05, | |
| "loss": 0.175, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.22857142857142856, | |
| "grad_norm": 0.043857940264053076, | |
| "learning_rate": 4.996892303047306e-05, | |
| "loss": 0.1691, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.2571428571428571, | |
| "grad_norm": 0.042294142827267324, | |
| "learning_rate": 4.987576938413504e-05, | |
| "loss": 0.1669, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.2857142857142857, | |
| "grad_norm": 0.030998439097041534, | |
| "learning_rate": 4.972077065562821e-05, | |
| "loss": 0.1602, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.3142857142857143, | |
| "grad_norm": 0.03425972383844138, | |
| "learning_rate": 4.9504312196213596e-05, | |
| "loss": 0.1511, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.34285714285714286, | |
| "grad_norm": 0.03949318202067683, | |
| "learning_rate": 4.922693215572695e-05, | |
| "loss": 0.1398, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.37142857142857144, | |
| "grad_norm": 0.039318951731654166, | |
| "learning_rate": 4.888932014465352e-05, | |
| "loss": 0.1395, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 0.036179419950033265, | |
| "learning_rate": 4.849231551964771e-05, | |
| "loss": 0.1341, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.42857142857142855, | |
| "grad_norm": 0.03497982080054259, | |
| "learning_rate": 4.803690529676019e-05, | |
| "loss": 0.1319, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.45714285714285713, | |
| "grad_norm": 0.03504652911611438, | |
| "learning_rate": 4.752422169756048e-05, | |
| "loss": 0.1184, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.4857142857142857, | |
| "grad_norm": 0.03366289822931868, | |
| "learning_rate": 4.6955539334255716e-05, | |
| "loss": 0.1248, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.5142857142857142, | |
| "grad_norm": 0.030373551018539273, | |
| "learning_rate": 4.6332272040803895e-05, | |
| "loss": 0.1089, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.5428571428571428, | |
| "grad_norm": 0.030313054901733562, | |
| "learning_rate": 4.5655969357899874e-05, | |
| "loss": 0.1076, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.5714285714285714, | |
| "grad_norm": 0.030895918951532413, | |
| "learning_rate": 4.4928312680573064e-05, | |
| "loss": 0.1092, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 0.029353647238654513, | |
| "learning_rate": 4.415111107797445e-05, | |
| "loss": 0.0981, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.6285714285714286, | |
| "grad_norm": 0.02984852008137626, | |
| "learning_rate": 4.332629679574566e-05, | |
| "loss": 0.0957, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.6571428571428571, | |
| "grad_norm": 0.029341330325013314, | |
| "learning_rate": 4.245592045215182e-05, | |
| "loss": 0.0854, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.6857142857142857, | |
| "grad_norm": 0.028162425011642102, | |
| "learning_rate": 4.154214593992149e-05, | |
| "loss": 0.0811, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.7142857142857143, | |
| "grad_norm": 0.027680225358496686, | |
| "learning_rate": 4.058724504646834e-05, | |
| "loss": 0.0814, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.7428571428571429, | |
| "grad_norm": 0.02576777114993237, | |
| "learning_rate": 3.959359180586975e-05, | |
| "loss": 0.0762, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.7714285714285715, | |
| "grad_norm": 0.02870619373322991, | |
| "learning_rate": 3.856365659664399e-05, | |
| "loss": 0.0687, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 0.024863648794319133, | |
| "learning_rate": 3.7500000000000003e-05, | |
| "loss": 0.0661, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.8285714285714286, | |
| "grad_norm": 0.026291877927815625, | |
| "learning_rate": 3.6405266433829075e-05, | |
| "loss": 0.0706, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.8571428571428571, | |
| "grad_norm": 0.025586933701267463, | |
| "learning_rate": 3.5282177578265296e-05, | |
| "loss": 0.0645, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.8857142857142857, | |
| "grad_norm": 0.025589408223466822, | |
| "learning_rate": 3.413352560915988e-05, | |
| "loss": 0.06, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.9142857142857143, | |
| "grad_norm": 0.031037343916879803, | |
| "learning_rate": 3.2962166256292113e-05, | |
| "loss": 0.0599, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.9428571428571428, | |
| "grad_norm": 0.023310202432820483, | |
| "learning_rate": 3.177101170357513e-05, | |
| "loss": 0.0569, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.9714285714285714, | |
| "grad_norm": 0.024283943525703268, | |
| "learning_rate": 3.056302334890786e-05, | |
| "loss": 0.0602, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.027329875987439457, | |
| "learning_rate": 2.9341204441673266e-05, | |
| "loss": 0.0589, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 1.0285714285714285, | |
| "grad_norm": 0.02501378282790971, | |
| "learning_rate": 2.8108592616187133e-05, | |
| "loss": 0.0543, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 1.0571428571428572, | |
| "grad_norm": 0.027119458807348496, | |
| "learning_rate": 2.686825233966061e-05, | |
| "loss": 0.0517, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 1.0857142857142856, | |
| "grad_norm": 0.02410634194975642, | |
| "learning_rate": 2.5623267293451826e-05, | |
| "loss": 0.0495, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 1.1142857142857143, | |
| "grad_norm": 0.02638687718523104, | |
| "learning_rate": 2.4376732706548183e-05, | |
| "loss": 0.047, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 1.1428571428571428, | |
| "grad_norm": 0.024038928955062953, | |
| "learning_rate": 2.3131747660339394e-05, | |
| "loss": 0.0453, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.1714285714285715, | |
| "grad_norm": 0.02760307457120193, | |
| "learning_rate": 2.189140738381288e-05, | |
| "loss": 0.0459, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "grad_norm": 0.02622815024441306, | |
| "learning_rate": 2.0658795558326743e-05, | |
| "loss": 0.0439, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 1.2285714285714286, | |
| "grad_norm": 0.035002615228578664, | |
| "learning_rate": 1.9436976651092144e-05, | |
| "loss": 0.0491, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 1.2571428571428571, | |
| "grad_norm": 0.024513705968269937, | |
| "learning_rate": 1.8228988296424877e-05, | |
| "loss": 0.0384, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 1.2857142857142856, | |
| "grad_norm": 0.030311524271095066, | |
| "learning_rate": 1.7037833743707892e-05, | |
| "loss": 0.04, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 1.3142857142857143, | |
| "grad_norm": 0.024027406130505052, | |
| "learning_rate": 1.5866474390840125e-05, | |
| "loss": 0.0423, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 1.342857142857143, | |
| "grad_norm": 0.021786340454075813, | |
| "learning_rate": 1.4717822421734718e-05, | |
| "loss": 0.0361, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 1.3714285714285714, | |
| "grad_norm": 0.02644652055974124, | |
| "learning_rate": 1.3594733566170926e-05, | |
| "loss": 0.0412, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "grad_norm": 0.020152919701181566, | |
| "learning_rate": 1.2500000000000006e-05, | |
| "loss": 0.0394, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 1.4285714285714286, | |
| "grad_norm": 0.019656157246220014, | |
| "learning_rate": 1.1436343403356017e-05, | |
| "loss": 0.0402, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.457142857142857, | |
| "grad_norm": 0.030741595634745503, | |
| "learning_rate": 1.0406408194130259e-05, | |
| "loss": 0.0399, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 1.4857142857142858, | |
| "grad_norm": 0.01956578251894237, | |
| "learning_rate": 9.412754953531663e-06, | |
| "loss": 0.0371, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 1.5142857142857142, | |
| "grad_norm": 0.020137900808619075, | |
| "learning_rate": 8.45785406007852e-06, | |
| "loss": 0.0408, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 1.5428571428571427, | |
| "grad_norm": 0.026267476897493938, | |
| "learning_rate": 7.5440795478481815e-06, | |
| "loss": 0.0361, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 1.5714285714285714, | |
| "grad_norm": 0.019075496865514192, | |
| "learning_rate": 6.673703204254347e-06, | |
| "loss": 0.0369, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "grad_norm": 0.022200908222749335, | |
| "learning_rate": 5.848888922025553e-06, | |
| "loss": 0.0341, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 1.6285714285714286, | |
| "grad_norm": 0.024122181450616925, | |
| "learning_rate": 5.071687319426946e-06, | |
| "loss": 0.0361, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 1.657142857142857, | |
| "grad_norm": 0.0186563328727714, | |
| "learning_rate": 4.344030642100133e-06, | |
| "loss": 0.0378, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 1.6857142857142857, | |
| "grad_norm": 0.019147174664357086, | |
| "learning_rate": 3.66772795919611e-06, | |
| "loss": 0.0329, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 1.7142857142857144, | |
| "grad_norm": 0.023587816390011593, | |
| "learning_rate": 3.044460665744284e-06, | |
| "loss": 0.035, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.7428571428571429, | |
| "grad_norm": 0.020512713984449708, | |
| "learning_rate": 2.475778302439524e-06, | |
| "loss": 0.0336, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 1.7714285714285714, | |
| "grad_norm": 0.026037372894968262, | |
| "learning_rate": 1.9630947032398067e-06, | |
| "loss": 0.0326, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "grad_norm": 0.02608380575210609, | |
| "learning_rate": 1.5076844803522922e-06, | |
| "loss": 0.0415, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 1.8285714285714287, | |
| "grad_norm": 0.0186002350742929, | |
| "learning_rate": 1.1106798553464804e-06, | |
| "loss": 0.0353, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 1.8571428571428572, | |
| "grad_norm": 0.022985332334595585, | |
| "learning_rate": 7.730678442730538e-07, | |
| "loss": 0.0393, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 1.8857142857142857, | |
| "grad_norm": 0.019579707985211765, | |
| "learning_rate": 4.956878037864043e-07, | |
| "loss": 0.0322, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 1.9142857142857141, | |
| "grad_norm": 0.01775819492990442, | |
| "learning_rate": 2.7922934437178695e-07, | |
| "loss": 0.0355, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 1.9428571428571428, | |
| "grad_norm": 0.01882371685418005, | |
| "learning_rate": 1.2423061586496477e-07, | |
| "loss": 0.0391, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 1.9714285714285715, | |
| "grad_norm": 0.01746735435052931, | |
| "learning_rate": 3.107696952694139e-08, | |
| "loss": 0.0353, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.018216407189787398, | |
| "learning_rate": 0.0, | |
| "loss": 0.0316, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "step": 70, | |
| "total_flos": 382633893167104.0, | |
| "train_loss": 0.07849941711340631, | |
| "train_runtime": 603.8266, | |
| "train_samples_per_second": 0.914, | |
| "train_steps_per_second": 0.116 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 70, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 382633893167104.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |