{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.6102713360247864, "eval_steps": 5000, "global_step": 6500, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.009388789784996713, "grad_norm": 87.77811431884766, "learning_rate": 9.009854528390429e-07, "loss": 16.2337, "step": 100 }, { "epoch": 0.018777579569993427, "grad_norm": 85.86430358886719, "learning_rate": 1.8395119662130456e-06, "loss": 13.5901, "step": 200 }, { "epoch": 0.02816636935499014, "grad_norm": 14.885255813598633, "learning_rate": 2.7592679493195683e-06, "loss": 9.8565, "step": 300 }, { "epoch": 0.03755515913998685, "grad_norm": 6.9691972732543945, "learning_rate": 3.6977944626935713e-06, "loss": 8.3332, "step": 400 }, { "epoch": 0.04694394892498357, "grad_norm": 5.612818241119385, "learning_rate": 4.6363209760675744e-06, "loss": 8.1261, "step": 500 }, { "epoch": 0.05633273870998028, "grad_norm": 4.705409526824951, "learning_rate": 5.574847489441577e-06, "loss": 8.0697, "step": 600 }, { "epoch": 0.06572152849497699, "grad_norm": 4.337332725524902, "learning_rate": 6.51337400281558e-06, "loss": 8.0298, "step": 700 }, { "epoch": 0.0751103182799737, "grad_norm": 3.6314213275909424, "learning_rate": 7.451900516189583e-06, "loss": 8.033, "step": 800 }, { "epoch": 0.08449910806497042, "grad_norm": 3.4845075607299805, "learning_rate": 8.390427029563585e-06, "loss": 7.9858, "step": 900 }, { "epoch": 0.09388789784996714, "grad_norm": 5.188210487365723, "learning_rate": 9.328953542937589e-06, "loss": 8.012, "step": 1000 }, { "epoch": 0.10327668763496385, "grad_norm": 3.0830442905426025, "learning_rate": 1.0267480056311592e-05, "loss": 7.9745, "step": 1100 }, { "epoch": 0.11266547741996057, "grad_norm": 3.4729278087615967, "learning_rate": 1.1206006569685594e-05, "loss": 8.0091, "step": 1200 }, { "epoch": 0.12205426720495728, "grad_norm": 2.329235076904297, "learning_rate": 1.2144533083059597e-05, "loss": 8.0221, "step": 1300 }, { "epoch": 0.13144305698995398, "grad_norm": 2.7225279808044434, "learning_rate": 1.3083059596433601e-05, "loss": 7.9583, "step": 1400 }, { "epoch": 0.1408318467749507, "grad_norm": 2.012805938720703, "learning_rate": 1.4021586109807603e-05, "loss": 8.0031, "step": 1500 }, { "epoch": 0.1502206365599474, "grad_norm": 2.9397523403167725, "learning_rate": 1.4960112623181606e-05, "loss": 7.9985, "step": 1600 }, { "epoch": 0.15960942634494413, "grad_norm": 2.356337308883667, "learning_rate": 1.589863913655561e-05, "loss": 7.9647, "step": 1700 }, { "epoch": 0.16899821612994084, "grad_norm": 2.6846818923950195, "learning_rate": 1.6837165649929613e-05, "loss": 7.9857, "step": 1800 }, { "epoch": 0.17838700591493756, "grad_norm": 2.0188565254211426, "learning_rate": 1.7775692163303613e-05, "loss": 7.9806, "step": 1900 }, { "epoch": 0.18777579569993427, "grad_norm": 4.030488014221191, "learning_rate": 1.8714218676677617e-05, "loss": 7.9761, "step": 2000 }, { "epoch": 0.197164585484931, "grad_norm": 4.183101654052734, "learning_rate": 1.965274519005162e-05, "loss": 7.9696, "step": 2100 }, { "epoch": 0.2065533752699277, "grad_norm": 1.4769889116287231, "learning_rate": 1.9934275728965626e-05, "loss": 8.0014, "step": 2200 }, { "epoch": 0.21594216505492442, "grad_norm": 2.1914358139038086, "learning_rate": 1.9829951489228525e-05, "loss": 7.9546, "step": 2300 }, { "epoch": 0.22533095483992113, "grad_norm": 22.55516815185547, "learning_rate": 1.972562724949142e-05, "loss": 7.9874, "step": 2400 }, { "epoch": 0.23471974462491785, "grad_norm": 1.635116457939148, "learning_rate": 1.962130300975432e-05, "loss": 7.9846, "step": 2500 }, { "epoch": 0.24410853440991456, "grad_norm": 5.707275390625, "learning_rate": 1.9516978770017215e-05, "loss": 7.9664, "step": 2600 }, { "epoch": 0.2534973241949113, "grad_norm": 4.194604396820068, "learning_rate": 1.9412654530280113e-05, "loss": 7.9725, "step": 2700 }, { "epoch": 0.26288611397990796, "grad_norm": 2.0074055194854736, "learning_rate": 1.930833029054301e-05, "loss": 7.9419, "step": 2800 }, { "epoch": 0.2722749037649047, "grad_norm": 26.4300479888916, "learning_rate": 1.9204006050805904e-05, "loss": 7.9786, "step": 2900 }, { "epoch": 0.2816636935499014, "grad_norm": 2.5870931148529053, "learning_rate": 1.9099681811068803e-05, "loss": 7.9479, "step": 3000 }, { "epoch": 0.29105248333489814, "grad_norm": 1.6209933757781982, "learning_rate": 1.8995357571331702e-05, "loss": 7.9526, "step": 3100 }, { "epoch": 0.3004412731198948, "grad_norm": 19.398080825805664, "learning_rate": 1.8891033331594598e-05, "loss": 7.9613, "step": 3200 }, { "epoch": 0.30983006290489157, "grad_norm": 2.124729871749878, "learning_rate": 1.8786709091857496e-05, "loss": 7.9994, "step": 3300 }, { "epoch": 0.31921885268988826, "grad_norm": 2.701019763946533, "learning_rate": 1.8682384852120392e-05, "loss": 7.9464, "step": 3400 }, { "epoch": 0.328607642474885, "grad_norm": 1.944600224494934, "learning_rate": 1.8578060612383287e-05, "loss": 7.9429, "step": 3500 }, { "epoch": 0.3379964322598817, "grad_norm": 41.85493087768555, "learning_rate": 1.8473736372646186e-05, "loss": 7.9539, "step": 3600 }, { "epoch": 0.34738522204487843, "grad_norm": 2.024019479751587, "learning_rate": 1.8369412132909085e-05, "loss": 7.9699, "step": 3700 }, { "epoch": 0.3567740118298751, "grad_norm": 2.2833781242370605, "learning_rate": 1.826508789317198e-05, "loss": 7.9144, "step": 3800 }, { "epoch": 0.36616280161487186, "grad_norm": 3.1666574478149414, "learning_rate": 1.8160763653434876e-05, "loss": 7.9424, "step": 3900 }, { "epoch": 0.37555159139986855, "grad_norm": 1.7259443998336792, "learning_rate": 1.8056439413697775e-05, "loss": 7.9361, "step": 4000 }, { "epoch": 0.3849403811848653, "grad_norm": 2.5588905811309814, "learning_rate": 1.795211517396067e-05, "loss": 7.9144, "step": 4100 }, { "epoch": 0.394329170969862, "grad_norm": 2.542963743209839, "learning_rate": 1.784779093422357e-05, "loss": 7.907, "step": 4200 }, { "epoch": 0.4037179607548587, "grad_norm": 2.755725622177124, "learning_rate": 1.7743466694486468e-05, "loss": 7.9049, "step": 4300 }, { "epoch": 0.4131067505398554, "grad_norm": 2.6067683696746826, "learning_rate": 1.7639142454749364e-05, "loss": 7.939, "step": 4400 }, { "epoch": 0.42249554032485215, "grad_norm": 2.614475965499878, "learning_rate": 1.753481821501226e-05, "loss": 7.9067, "step": 4500 }, { "epoch": 0.43188433010984884, "grad_norm": 2.172943353652954, "learning_rate": 1.7430493975275155e-05, "loss": 7.9149, "step": 4600 }, { "epoch": 0.4412731198948456, "grad_norm": 6.292716979980469, "learning_rate": 1.7326169735538053e-05, "loss": 7.9705, "step": 4700 }, { "epoch": 0.45066190967984227, "grad_norm": 2.595337152481079, "learning_rate": 1.7221845495800952e-05, "loss": 7.8992, "step": 4800 }, { "epoch": 0.460050699464839, "grad_norm": 5.529814720153809, "learning_rate": 1.7117521256063848e-05, "loss": 7.9077, "step": 4900 }, { "epoch": 0.4694394892498357, "grad_norm": 6.696155548095703, "learning_rate": 1.7013197016326747e-05, "loss": 7.8992, "step": 5000 }, { "epoch": 0.4694394892498357, "eval_loss": 7.916718006134033, "eval_runtime": 334.3618, "eval_samples_per_second": 1019.336, "eval_steps_per_second": 7.964, "step": 5000 }, { "epoch": 0.4788282790348324, "grad_norm": 26.500988006591797, "learning_rate": 1.6908872776589642e-05, "loss": 7.914, "step": 5100 }, { "epoch": 0.4882170688198291, "grad_norm": 2.454939126968384, "learning_rate": 1.6804548536852537e-05, "loss": 7.8913, "step": 5200 }, { "epoch": 0.4976058586048258, "grad_norm": 1.8935959339141846, "learning_rate": 1.6700224297115436e-05, "loss": 7.8999, "step": 5300 }, { "epoch": 0.5069946483898226, "grad_norm": 2.2066667079925537, "learning_rate": 1.6595900057378335e-05, "loss": 7.8818, "step": 5400 }, { "epoch": 0.5163834381748192, "grad_norm": 2.7672908306121826, "learning_rate": 1.649157581764123e-05, "loss": 7.9383, "step": 5500 }, { "epoch": 0.5257722279598159, "grad_norm": 4.411477088928223, "learning_rate": 1.6387251577904126e-05, "loss": 7.9094, "step": 5600 }, { "epoch": 0.5351610177448127, "grad_norm": 50.536991119384766, "learning_rate": 1.6282927338167025e-05, "loss": 7.8986, "step": 5700 }, { "epoch": 0.5445498075298094, "grad_norm": 2.9657583236694336, "learning_rate": 1.617860309842992e-05, "loss": 7.9015, "step": 5800 }, { "epoch": 0.5539385973148061, "grad_norm": 1.910575270652771, "learning_rate": 1.607427885869282e-05, "loss": 7.9059, "step": 5900 }, { "epoch": 0.5633273870998028, "grad_norm": 3.9665298461914062, "learning_rate": 1.5969954618955715e-05, "loss": 7.8524, "step": 6000 }, { "epoch": 0.5727161768847996, "grad_norm": 2.0286688804626465, "learning_rate": 1.5865630379218614e-05, "loss": 7.8788, "step": 6100 }, { "epoch": 0.5821049666697963, "grad_norm": 3.200676202774048, "learning_rate": 1.576130613948151e-05, "loss": 7.8712, "step": 6200 }, { "epoch": 0.591493756454793, "grad_norm": 2.0278351306915283, "learning_rate": 1.5656981899744408e-05, "loss": 7.8967, "step": 6300 }, { "epoch": 0.6008825462397896, "grad_norm": 3.4500420093536377, "learning_rate": 1.5553700902404677e-05, "loss": 7.8677, "step": 6400 }, { "epoch": 0.6102713360247864, "grad_norm": 2.8257575035095215, "learning_rate": 1.5450419905064945e-05, "loss": 7.9132, "step": 6500 } ], "logging_steps": 100, "max_steps": 21302, "num_input_tokens_seen": 0, "num_train_epochs": 2, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 0.0, "train_batch_size": 128, "trial_name": null, "trial_params": null }