| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 50.0, | |
| "global_step": 4700, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 8e-05, | |
| "loss": 3.1316, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 2.3430750370025635, | |
| "eval_runtime": 37.4569, | |
| "eval_samples_per_second": 5.339, | |
| "eval_steps_per_second": 0.667, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "learning_rate": 6e-05, | |
| "loss": 2.812, | |
| "step": 188 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 2.2115001678466797, | |
| "eval_runtime": 37.1409, | |
| "eval_samples_per_second": 5.385, | |
| "eval_steps_per_second": 0.673, | |
| "step": 188 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "learning_rate": 4e-05, | |
| "loss": 2.8118, | |
| "step": 282 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 1.984442949295044, | |
| "eval_runtime": 37.3422, | |
| "eval_samples_per_second": 5.356, | |
| "eval_steps_per_second": 0.669, | |
| "step": 282 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "learning_rate": 2e-05, | |
| "loss": 2.5555, | |
| "step": 376 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_loss": 1.9309371709823608, | |
| "eval_runtime": 37.2453, | |
| "eval_samples_per_second": 5.37, | |
| "eval_steps_per_second": 0.671, | |
| "step": 376 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "learning_rate": 0.0, | |
| "loss": 2.4803, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_loss": 1.8790072202682495, | |
| "eval_runtime": 37.0224, | |
| "eval_samples_per_second": 5.402, | |
| "eval_steps_per_second": 0.675, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "learning_rate": 4e-05, | |
| "loss": 2.5099, | |
| "step": 564 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_loss": 2.029414415359497, | |
| "eval_runtime": 36.8084, | |
| "eval_samples_per_second": 5.434, | |
| "eval_steps_per_second": 0.679, | |
| "step": 564 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "learning_rate": 3e-05, | |
| "loss": 2.5365, | |
| "step": 658 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_loss": 1.8844919204711914, | |
| "eval_runtime": 36.3496, | |
| "eval_samples_per_second": 5.502, | |
| "eval_steps_per_second": 0.688, | |
| "step": 658 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "learning_rate": 2e-05, | |
| "loss": 2.4593, | |
| "step": 752 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_loss": 1.869893193244934, | |
| "eval_runtime": 36.7235, | |
| "eval_samples_per_second": 5.446, | |
| "eval_steps_per_second": 0.681, | |
| "step": 752 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "learning_rate": 1e-05, | |
| "loss": 2.4248, | |
| "step": 846 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_loss": 1.794630765914917, | |
| "eval_runtime": 37.5145, | |
| "eval_samples_per_second": 5.331, | |
| "eval_steps_per_second": 0.666, | |
| "step": 846 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "learning_rate": 0.0, | |
| "loss": 2.4017, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_loss": 1.7905255556106567, | |
| "eval_runtime": 37.4985, | |
| "eval_samples_per_second": 5.334, | |
| "eval_steps_per_second": 0.667, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "learning_rate": 2.6666666666666667e-05, | |
| "loss": 2.4523, | |
| "step": 1034 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "eval_loss": 1.8318734169006348, | |
| "eval_runtime": 36.8394, | |
| "eval_samples_per_second": 5.429, | |
| "eval_steps_per_second": 0.679, | |
| "step": 1034 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "learning_rate": 2e-05, | |
| "loss": 2.4407, | |
| "step": 1128 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "eval_loss": 1.8369686603546143, | |
| "eval_runtime": 37.09, | |
| "eval_samples_per_second": 5.392, | |
| "eval_steps_per_second": 0.674, | |
| "step": 1128 | |
| }, | |
| { | |
| "epoch": 13.0, | |
| "learning_rate": 1.3333333333333333e-05, | |
| "loss": 2.3727, | |
| "step": 1222 | |
| }, | |
| { | |
| "epoch": 13.0, | |
| "eval_loss": 1.8001278638839722, | |
| "eval_runtime": 36.6653, | |
| "eval_samples_per_second": 5.455, | |
| "eval_steps_per_second": 0.682, | |
| "step": 1222 | |
| }, | |
| { | |
| "epoch": 14.0, | |
| "learning_rate": 6.666666666666667e-06, | |
| "loss": 2.317, | |
| "step": 1316 | |
| }, | |
| { | |
| "epoch": 14.0, | |
| "eval_loss": 1.7492361068725586, | |
| "eval_runtime": 37.2728, | |
| "eval_samples_per_second": 5.366, | |
| "eval_steps_per_second": 0.671, | |
| "step": 1316 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "learning_rate": 0.0, | |
| "loss": 2.3292, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "eval_loss": 1.7530813217163086, | |
| "eval_runtime": 36.325, | |
| "eval_samples_per_second": 5.506, | |
| "eval_steps_per_second": 0.688, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 16.0, | |
| "learning_rate": 2e-05, | |
| "loss": 2.3086, | |
| "step": 1504 | |
| }, | |
| { | |
| "epoch": 16.0, | |
| "eval_loss": 1.7637449502944946, | |
| "eval_runtime": 36.2915, | |
| "eval_samples_per_second": 5.511, | |
| "eval_steps_per_second": 0.689, | |
| "step": 1504 | |
| }, | |
| { | |
| "epoch": 17.0, | |
| "learning_rate": 1.5e-05, | |
| "loss": 2.3175, | |
| "step": 1598 | |
| }, | |
| { | |
| "epoch": 17.0, | |
| "eval_loss": 1.7302308082580566, | |
| "eval_runtime": 36.52, | |
| "eval_samples_per_second": 5.476, | |
| "eval_steps_per_second": 0.685, | |
| "step": 1598 | |
| }, | |
| { | |
| "epoch": 18.0, | |
| "learning_rate": 1e-05, | |
| "loss": 2.3002, | |
| "step": 1692 | |
| }, | |
| { | |
| "epoch": 18.0, | |
| "eval_loss": 1.7216464281082153, | |
| "eval_runtime": 36.5936, | |
| "eval_samples_per_second": 5.465, | |
| "eval_steps_per_second": 0.683, | |
| "step": 1692 | |
| }, | |
| { | |
| "epoch": 19.0, | |
| "learning_rate": 5e-06, | |
| "loss": 2.2756, | |
| "step": 1786 | |
| }, | |
| { | |
| "epoch": 19.0, | |
| "eval_loss": 1.734541416168213, | |
| "eval_runtime": 36.178, | |
| "eval_samples_per_second": 5.528, | |
| "eval_steps_per_second": 0.691, | |
| "step": 1786 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "learning_rate": 0.0, | |
| "loss": 2.2656, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "eval_loss": 1.7225457429885864, | |
| "eval_runtime": 36.7476, | |
| "eval_samples_per_second": 5.443, | |
| "eval_steps_per_second": 0.68, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 21.0, | |
| "learning_rate": 1.6000000000000003e-05, | |
| "loss": 2.3083, | |
| "step": 1974 | |
| }, | |
| { | |
| "epoch": 21.0, | |
| "eval_loss": 1.7549386024475098, | |
| "eval_runtime": 37.0581, | |
| "eval_samples_per_second": 5.397, | |
| "eval_steps_per_second": 0.675, | |
| "step": 1974 | |
| }, | |
| { | |
| "epoch": 22.0, | |
| "learning_rate": 1.2e-05, | |
| "loss": 2.2542, | |
| "step": 2068 | |
| }, | |
| { | |
| "epoch": 22.0, | |
| "eval_loss": 1.7174808979034424, | |
| "eval_runtime": 36.8163, | |
| "eval_samples_per_second": 5.432, | |
| "eval_steps_per_second": 0.679, | |
| "step": 2068 | |
| }, | |
| { | |
| "epoch": 23.0, | |
| "learning_rate": 8.000000000000001e-06, | |
| "loss": 2.2262, | |
| "step": 2162 | |
| }, | |
| { | |
| "epoch": 23.0, | |
| "eval_loss": 1.699831485748291, | |
| "eval_runtime": 36.7706, | |
| "eval_samples_per_second": 5.439, | |
| "eval_steps_per_second": 0.68, | |
| "step": 2162 | |
| }, | |
| { | |
| "epoch": 24.0, | |
| "learning_rate": 4.000000000000001e-06, | |
| "loss": 2.2644, | |
| "step": 2256 | |
| }, | |
| { | |
| "epoch": 24.0, | |
| "eval_loss": 1.7019526958465576, | |
| "eval_runtime": 37.036, | |
| "eval_samples_per_second": 5.4, | |
| "eval_steps_per_second": 0.675, | |
| "step": 2256 | |
| }, | |
| { | |
| "epoch": 25.0, | |
| "learning_rate": 0.0, | |
| "loss": 2.2392, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 25.0, | |
| "eval_loss": 1.6933141946792603, | |
| "eval_runtime": 36.9874, | |
| "eval_samples_per_second": 5.407, | |
| "eval_steps_per_second": 0.676, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 26.0, | |
| "learning_rate": 1.3333333333333333e-05, | |
| "loss": 2.228, | |
| "step": 2444 | |
| }, | |
| { | |
| "epoch": 26.0, | |
| "eval_loss": 1.7434390783309937, | |
| "eval_runtime": 36.8828, | |
| "eval_samples_per_second": 5.423, | |
| "eval_steps_per_second": 0.678, | |
| "step": 2444 | |
| }, | |
| { | |
| "epoch": 27.0, | |
| "learning_rate": 1e-05, | |
| "loss": 2.2284, | |
| "step": 2538 | |
| }, | |
| { | |
| "epoch": 27.0, | |
| "eval_loss": 1.7069613933563232, | |
| "eval_runtime": 36.8907, | |
| "eval_samples_per_second": 5.421, | |
| "eval_steps_per_second": 0.678, | |
| "step": 2538 | |
| }, | |
| { | |
| "epoch": 28.0, | |
| "learning_rate": 6.666666666666667e-06, | |
| "loss": 2.2019, | |
| "step": 2632 | |
| }, | |
| { | |
| "epoch": 28.0, | |
| "eval_loss": 1.6977431774139404, | |
| "eval_runtime": 36.9211, | |
| "eval_samples_per_second": 5.417, | |
| "eval_steps_per_second": 0.677, | |
| "step": 2632 | |
| }, | |
| { | |
| "epoch": 29.0, | |
| "learning_rate": 3.3333333333333333e-06, | |
| "loss": 2.1804, | |
| "step": 2726 | |
| }, | |
| { | |
| "epoch": 29.0, | |
| "eval_loss": 1.6867393255233765, | |
| "eval_runtime": 36.4132, | |
| "eval_samples_per_second": 5.493, | |
| "eval_steps_per_second": 0.687, | |
| "step": 2726 | |
| }, | |
| { | |
| "epoch": 30.0, | |
| "learning_rate": 0.0, | |
| "loss": 2.1939, | |
| "step": 2820 | |
| }, | |
| { | |
| "epoch": 30.0, | |
| "eval_loss": 1.6858627796173096, | |
| "eval_runtime": 36.2466, | |
| "eval_samples_per_second": 5.518, | |
| "eval_steps_per_second": 0.69, | |
| "step": 2820 | |
| }, | |
| { | |
| "epoch": 31.0, | |
| "learning_rate": 1.1428571428571429e-05, | |
| "loss": 2.1863, | |
| "step": 2914 | |
| }, | |
| { | |
| "epoch": 31.0, | |
| "eval_loss": 1.6801691055297852, | |
| "eval_runtime": 37.7204, | |
| "eval_samples_per_second": 5.302, | |
| "eval_steps_per_second": 0.663, | |
| "step": 2914 | |
| }, | |
| { | |
| "epoch": 32.0, | |
| "learning_rate": 8.571428571428573e-06, | |
| "loss": 2.2009, | |
| "step": 3008 | |
| }, | |
| { | |
| "epoch": 32.0, | |
| "eval_loss": 1.6939789056777954, | |
| "eval_runtime": 37.6669, | |
| "eval_samples_per_second": 5.31, | |
| "eval_steps_per_second": 0.664, | |
| "step": 3008 | |
| }, | |
| { | |
| "epoch": 33.0, | |
| "learning_rate": 5.7142857142857145e-06, | |
| "loss": 2.1894, | |
| "step": 3102 | |
| }, | |
| { | |
| "epoch": 33.0, | |
| "eval_loss": 1.6720386743545532, | |
| "eval_runtime": 38.2727, | |
| "eval_samples_per_second": 5.226, | |
| "eval_steps_per_second": 0.653, | |
| "step": 3102 | |
| }, | |
| { | |
| "epoch": 34.0, | |
| "learning_rate": 2.8571428571428573e-06, | |
| "loss": 2.1759, | |
| "step": 3196 | |
| }, | |
| { | |
| "epoch": 34.0, | |
| "eval_loss": 1.6699862480163574, | |
| "eval_runtime": 37.7344, | |
| "eval_samples_per_second": 5.3, | |
| "eval_steps_per_second": 0.663, | |
| "step": 3196 | |
| }, | |
| { | |
| "epoch": 35.0, | |
| "learning_rate": 0.0, | |
| "loss": 2.1575, | |
| "step": 3290 | |
| }, | |
| { | |
| "epoch": 35.0, | |
| "eval_loss": 1.6713430881500244, | |
| "eval_runtime": 36.971, | |
| "eval_samples_per_second": 5.41, | |
| "eval_steps_per_second": 0.676, | |
| "step": 3290 | |
| }, | |
| { | |
| "epoch": 36.0, | |
| "learning_rate": 1e-05, | |
| "loss": 2.1715, | |
| "step": 3384 | |
| }, | |
| { | |
| "epoch": 36.0, | |
| "eval_loss": 1.7286556959152222, | |
| "eval_runtime": 37.366, | |
| "eval_samples_per_second": 5.352, | |
| "eval_steps_per_second": 0.669, | |
| "step": 3384 | |
| }, | |
| { | |
| "epoch": 37.0, | |
| "learning_rate": 7.5e-06, | |
| "loss": 2.2125, | |
| "step": 3478 | |
| }, | |
| { | |
| "epoch": 37.0, | |
| "eval_loss": 1.6994493007659912, | |
| "eval_runtime": 36.8702, | |
| "eval_samples_per_second": 5.424, | |
| "eval_steps_per_second": 0.678, | |
| "step": 3478 | |
| }, | |
| { | |
| "epoch": 38.0, | |
| "learning_rate": 5e-06, | |
| "loss": 2.2032, | |
| "step": 3572 | |
| }, | |
| { | |
| "epoch": 38.0, | |
| "eval_loss": 1.6896188259124756, | |
| "eval_runtime": 37.5355, | |
| "eval_samples_per_second": 5.328, | |
| "eval_steps_per_second": 0.666, | |
| "step": 3572 | |
| }, | |
| { | |
| "epoch": 39.0, | |
| "learning_rate": 2.5e-06, | |
| "loss": 2.21, | |
| "step": 3666 | |
| }, | |
| { | |
| "epoch": 39.0, | |
| "eval_loss": 1.6792980432510376, | |
| "eval_runtime": 37.2056, | |
| "eval_samples_per_second": 5.376, | |
| "eval_steps_per_second": 0.672, | |
| "step": 3666 | |
| }, | |
| { | |
| "epoch": 40.0, | |
| "learning_rate": 0.0, | |
| "loss": 2.1837, | |
| "step": 3760 | |
| }, | |
| { | |
| "epoch": 40.0, | |
| "eval_loss": 1.6746587753295898, | |
| "eval_runtime": 37.0982, | |
| "eval_samples_per_second": 5.391, | |
| "eval_steps_per_second": 0.674, | |
| "step": 3760 | |
| }, | |
| { | |
| "epoch": 41.0, | |
| "learning_rate": 8.88888888888889e-06, | |
| "loss": 2.2136, | |
| "step": 3854 | |
| }, | |
| { | |
| "epoch": 41.0, | |
| "eval_loss": 1.6727588176727295, | |
| "eval_runtime": 36.6175, | |
| "eval_samples_per_second": 5.462, | |
| "eval_steps_per_second": 0.683, | |
| "step": 3854 | |
| }, | |
| { | |
| "epoch": 42.0, | |
| "learning_rate": 6.666666666666667e-06, | |
| "loss": 2.1825, | |
| "step": 3948 | |
| }, | |
| { | |
| "epoch": 42.0, | |
| "eval_loss": 1.6641244888305664, | |
| "eval_runtime": 36.4688, | |
| "eval_samples_per_second": 5.484, | |
| "eval_steps_per_second": 0.686, | |
| "step": 3948 | |
| }, | |
| { | |
| "epoch": 43.0, | |
| "learning_rate": 4.444444444444445e-06, | |
| "loss": 2.1419, | |
| "step": 4042 | |
| }, | |
| { | |
| "epoch": 43.0, | |
| "eval_loss": 1.6828693151474, | |
| "eval_runtime": 36.8137, | |
| "eval_samples_per_second": 5.433, | |
| "eval_steps_per_second": 0.679, | |
| "step": 4042 | |
| }, | |
| { | |
| "epoch": 44.0, | |
| "learning_rate": 2.2222222222222225e-06, | |
| "loss": 2.1695, | |
| "step": 4136 | |
| }, | |
| { | |
| "epoch": 44.0, | |
| "eval_loss": 1.6625133752822876, | |
| "eval_runtime": 37.3227, | |
| "eval_samples_per_second": 5.359, | |
| "eval_steps_per_second": 0.67, | |
| "step": 4136 | |
| }, | |
| { | |
| "epoch": 45.0, | |
| "learning_rate": 0.0, | |
| "loss": 2.1478, | |
| "step": 4230 | |
| }, | |
| { | |
| "epoch": 45.0, | |
| "eval_loss": 1.667972445487976, | |
| "eval_runtime": 37.5388, | |
| "eval_samples_per_second": 5.328, | |
| "eval_steps_per_second": 0.666, | |
| "step": 4230 | |
| }, | |
| { | |
| "epoch": 46.0, | |
| "learning_rate": 8.000000000000001e-06, | |
| "loss": 2.1464, | |
| "step": 4324 | |
| }, | |
| { | |
| "epoch": 46.0, | |
| "eval_loss": 1.6795276403427124, | |
| "eval_runtime": 37.4735, | |
| "eval_samples_per_second": 5.337, | |
| "eval_steps_per_second": 0.667, | |
| "step": 4324 | |
| }, | |
| { | |
| "epoch": 47.0, | |
| "learning_rate": 6e-06, | |
| "loss": 2.1809, | |
| "step": 4418 | |
| }, | |
| { | |
| "epoch": 47.0, | |
| "eval_loss": 1.6775075197219849, | |
| "eval_runtime": 37.2204, | |
| "eval_samples_per_second": 5.373, | |
| "eval_steps_per_second": 0.672, | |
| "step": 4418 | |
| }, | |
| { | |
| "epoch": 48.0, | |
| "learning_rate": 4.000000000000001e-06, | |
| "loss": 2.174, | |
| "step": 4512 | |
| }, | |
| { | |
| "epoch": 48.0, | |
| "eval_loss": 1.666752576828003, | |
| "eval_runtime": 36.9029, | |
| "eval_samples_per_second": 5.42, | |
| "eval_steps_per_second": 0.677, | |
| "step": 4512 | |
| }, | |
| { | |
| "epoch": 49.0, | |
| "learning_rate": 2.0000000000000003e-06, | |
| "loss": 2.1391, | |
| "step": 4606 | |
| }, | |
| { | |
| "epoch": 49.0, | |
| "eval_loss": 1.6559373140335083, | |
| "eval_runtime": 36.8614, | |
| "eval_samples_per_second": 5.426, | |
| "eval_steps_per_second": 0.678, | |
| "step": 4606 | |
| }, | |
| { | |
| "epoch": 50.0, | |
| "learning_rate": 0.0, | |
| "loss": 2.1466, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 50.0, | |
| "eval_loss": 1.6657767295837402, | |
| "eval_runtime": 37.3694, | |
| "eval_samples_per_second": 5.352, | |
| "eval_steps_per_second": 0.669, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 50.0, | |
| "step": 4700, | |
| "total_flos": 1.7920966176e+19, | |
| "train_loss": 0.21573773972531582, | |
| "train_runtime": 1237.9738, | |
| "train_samples_per_second": 30.291, | |
| "train_steps_per_second": 3.797 | |
| } | |
| ], | |
| "max_steps": 4700, | |
| "num_train_epochs": 50, | |
| "total_flos": 1.7920966176e+19, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |