mgcyung's picture
Upload folder using huggingface_hub
8c95d91 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 57,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.054982817869415807,
"grad_norm": 2.3391265869140625,
"learning_rate": 5.000000000000001e-07,
"loss": 1.2932,
"step": 1
},
{
"epoch": 0.10996563573883161,
"grad_norm": 2.1669816970825195,
"learning_rate": 1.0000000000000002e-06,
"loss": 1.2642,
"step": 2
},
{
"epoch": 0.16494845360824742,
"grad_norm": 2.2039060592651367,
"learning_rate": 1.5e-06,
"loss": 1.2409,
"step": 3
},
{
"epoch": 0.21993127147766323,
"grad_norm": 2.185878038406372,
"learning_rate": 2.0000000000000003e-06,
"loss": 1.244,
"step": 4
},
{
"epoch": 0.27491408934707906,
"grad_norm": 2.1814076900482178,
"learning_rate": 2.5e-06,
"loss": 1.1757,
"step": 5
},
{
"epoch": 0.32989690721649484,
"grad_norm": 2.100562334060669,
"learning_rate": 3e-06,
"loss": 1.2375,
"step": 6
},
{
"epoch": 0.3848797250859107,
"grad_norm": 2.1685991287231445,
"learning_rate": 3.5000000000000004e-06,
"loss": 1.218,
"step": 7
},
{
"epoch": 0.43986254295532645,
"grad_norm": 2.6219348907470703,
"learning_rate": 4.000000000000001e-06,
"loss": 1.3291,
"step": 8
},
{
"epoch": 0.4948453608247423,
"grad_norm": 2.053830862045288,
"learning_rate": 4.5e-06,
"loss": 1.2892,
"step": 9
},
{
"epoch": 0.5498281786941581,
"grad_norm": 2.335982084274292,
"learning_rate": 5e-06,
"loss": 1.2636,
"step": 10
},
{
"epoch": 0.6048109965635738,
"grad_norm": 2.1228444576263428,
"learning_rate": 5.500000000000001e-06,
"loss": 1.2267,
"step": 11
},
{
"epoch": 0.6597938144329897,
"grad_norm": 2.169857978820801,
"learning_rate": 6e-06,
"loss": 1.2454,
"step": 12
},
{
"epoch": 0.7147766323024055,
"grad_norm": 2.084686279296875,
"learning_rate": 6.5000000000000004e-06,
"loss": 1.1907,
"step": 13
},
{
"epoch": 0.7697594501718213,
"grad_norm": 2.284982442855835,
"learning_rate": 7.000000000000001e-06,
"loss": 1.2998,
"step": 14
},
{
"epoch": 0.8247422680412371,
"grad_norm": 1.9793288707733154,
"learning_rate": 7.5e-06,
"loss": 1.2019,
"step": 15
},
{
"epoch": 0.8797250859106529,
"grad_norm": 1.9528135061264038,
"learning_rate": 8.000000000000001e-06,
"loss": 1.2003,
"step": 16
},
{
"epoch": 0.9347079037800687,
"grad_norm": 1.9243836402893066,
"learning_rate": 8.500000000000002e-06,
"loss": 1.1621,
"step": 17
},
{
"epoch": 0.9896907216494846,
"grad_norm": 1.9052047729492188,
"learning_rate": 9e-06,
"loss": 1.1504,
"step": 18
},
{
"epoch": 1.0,
"grad_norm": 2.759420394897461,
"learning_rate": 9.5e-06,
"loss": 1.2492,
"step": 19
},
{
"epoch": 1.0549828178694158,
"grad_norm": 1.8658391237258911,
"learning_rate": 1e-05,
"loss": 1.1941,
"step": 20
},
{
"epoch": 1.1099656357388317,
"grad_norm": 1.8759143352508545,
"learning_rate": 1.05e-05,
"loss": 1.1509,
"step": 21
},
{
"epoch": 1.1649484536082475,
"grad_norm": 1.7013018131256104,
"learning_rate": 1.1000000000000001e-05,
"loss": 1.0713,
"step": 22
},
{
"epoch": 1.2199312714776633,
"grad_norm": 1.4075945615768433,
"learning_rate": 1.1500000000000002e-05,
"loss": 1.0761,
"step": 23
},
{
"epoch": 1.274914089347079,
"grad_norm": 1.4849729537963867,
"learning_rate": 1.2e-05,
"loss": 1.0839,
"step": 24
},
{
"epoch": 1.3298969072164948,
"grad_norm": 1.3940335512161255,
"learning_rate": 1.25e-05,
"loss": 1.0349,
"step": 25
},
{
"epoch": 1.3848797250859106,
"grad_norm": 1.248413324356079,
"learning_rate": 1.3000000000000001e-05,
"loss": 1.1078,
"step": 26
},
{
"epoch": 1.4398625429553265,
"grad_norm": 1.2348881959915161,
"learning_rate": 1.3500000000000001e-05,
"loss": 1.0563,
"step": 27
},
{
"epoch": 1.4948453608247423,
"grad_norm": 1.1840934753417969,
"learning_rate": 1.4000000000000001e-05,
"loss": 1.117,
"step": 28
},
{
"epoch": 1.5498281786941581,
"grad_norm": 0.9969284534454346,
"learning_rate": 1.45e-05,
"loss": 1.0499,
"step": 29
},
{
"epoch": 1.604810996563574,
"grad_norm": 1.0228712558746338,
"learning_rate": 1.5e-05,
"loss": 0.9143,
"step": 30
},
{
"epoch": 1.6597938144329896,
"grad_norm": 0.8392165303230286,
"learning_rate": 1.55e-05,
"loss": 1.0712,
"step": 31
},
{
"epoch": 1.7147766323024056,
"grad_norm": 0.6976754069328308,
"learning_rate": 1.6000000000000003e-05,
"loss": 0.9455,
"step": 32
},
{
"epoch": 1.7697594501718212,
"grad_norm": 0.7253638505935669,
"learning_rate": 1.65e-05,
"loss": 0.8994,
"step": 33
},
{
"epoch": 1.824742268041237,
"grad_norm": 0.5587965846061707,
"learning_rate": 1.7000000000000003e-05,
"loss": 0.8769,
"step": 34
},
{
"epoch": 1.879725085910653,
"grad_norm": 0.5404813885688782,
"learning_rate": 1.75e-05,
"loss": 0.8509,
"step": 35
},
{
"epoch": 1.9347079037800687,
"grad_norm": 0.4780674874782562,
"learning_rate": 1.8e-05,
"loss": 0.9384,
"step": 36
},
{
"epoch": 1.9896907216494846,
"grad_norm": 0.4203198254108429,
"learning_rate": 1.85e-05,
"loss": 0.8815,
"step": 37
},
{
"epoch": 2.0,
"grad_norm": 0.5001429319381714,
"learning_rate": 1.9e-05,
"loss": 0.9702,
"step": 38
},
{
"epoch": 2.0549828178694156,
"grad_norm": 0.33664512634277344,
"learning_rate": 1.9500000000000003e-05,
"loss": 0.8686,
"step": 39
},
{
"epoch": 2.1099656357388317,
"grad_norm": 0.3299224376678467,
"learning_rate": 2e-05,
"loss": 0.9143,
"step": 40
},
{
"epoch": 2.1649484536082473,
"grad_norm": 0.3843877911567688,
"learning_rate": 2.05e-05,
"loss": 0.7757,
"step": 41
},
{
"epoch": 2.2199312714776633,
"grad_norm": 0.2754301428794861,
"learning_rate": 2.1e-05,
"loss": 0.8514,
"step": 42
},
{
"epoch": 2.274914089347079,
"grad_norm": 0.27374932169914246,
"learning_rate": 2.15e-05,
"loss": 0.7997,
"step": 43
},
{
"epoch": 2.329896907216495,
"grad_norm": 0.2719466984272003,
"learning_rate": 2.2000000000000003e-05,
"loss": 0.8819,
"step": 44
},
{
"epoch": 2.3848797250859106,
"grad_norm": 0.2750904858112335,
"learning_rate": 2.25e-05,
"loss": 0.7952,
"step": 45
},
{
"epoch": 2.4398625429553267,
"grad_norm": 0.20324264466762543,
"learning_rate": 2.3000000000000003e-05,
"loss": 0.8438,
"step": 46
},
{
"epoch": 2.4948453608247423,
"grad_norm": 0.22448420524597168,
"learning_rate": 2.35e-05,
"loss": 0.8569,
"step": 47
},
{
"epoch": 2.549828178694158,
"grad_norm": 0.3943282961845398,
"learning_rate": 2.4e-05,
"loss": 0.7743,
"step": 48
},
{
"epoch": 2.604810996563574,
"grad_norm": 0.23266704380512238,
"learning_rate": 2.45e-05,
"loss": 0.8089,
"step": 49
},
{
"epoch": 2.6597938144329896,
"grad_norm": 0.1900027096271515,
"learning_rate": 2.5e-05,
"loss": 0.8926,
"step": 50
},
{
"epoch": 2.7147766323024056,
"grad_norm": 0.19019268453121185,
"learning_rate": 2.5500000000000003e-05,
"loss": 0.7765,
"step": 51
},
{
"epoch": 2.7697594501718212,
"grad_norm": 0.2077689915895462,
"learning_rate": 2.6000000000000002e-05,
"loss": 0.763,
"step": 52
},
{
"epoch": 2.824742268041237,
"grad_norm": 0.1671900600194931,
"learning_rate": 2.6500000000000004e-05,
"loss": 0.7413,
"step": 53
},
{
"epoch": 2.879725085910653,
"grad_norm": 0.2202858328819275,
"learning_rate": 2.7000000000000002e-05,
"loss": 0.7761,
"step": 54
},
{
"epoch": 2.934707903780069,
"grad_norm": 0.18933026492595673,
"learning_rate": 2.7500000000000004e-05,
"loss": 0.8357,
"step": 55
},
{
"epoch": 2.9896907216494846,
"grad_norm": 0.19358232617378235,
"learning_rate": 2.8000000000000003e-05,
"loss": 0.8198,
"step": 56
},
{
"epoch": 3.0,
"grad_norm": 0.2464047223329544,
"learning_rate": 2.8499999999999998e-05,
"loss": 0.7364,
"step": 57
}
],
"logging_steps": 1.0,
"max_steps": 57,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.0642424594366464e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}