mgcyung's picture
Upload folder using huggingface_hub
c6173a3 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 57,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.054982817869415807,
"grad_norm": 2.2747802734375,
"learning_rate": 5.000000000000001e-07,
"loss": 1.2932,
"step": 1
},
{
"epoch": 0.10996563573883161,
"grad_norm": 2.124077796936035,
"learning_rate": 1.0000000000000002e-06,
"loss": 1.2642,
"step": 2
},
{
"epoch": 0.16494845360824742,
"grad_norm": 2.1374475955963135,
"learning_rate": 1.5e-06,
"loss": 1.2401,
"step": 3
},
{
"epoch": 0.21993127147766323,
"grad_norm": 2.113400459289551,
"learning_rate": 2.0000000000000003e-06,
"loss": 1.2434,
"step": 4
},
{
"epoch": 0.27491408934707906,
"grad_norm": 2.054466962814331,
"learning_rate": 2.5e-06,
"loss": 1.175,
"step": 5
},
{
"epoch": 0.32989690721649484,
"grad_norm": 2.11995267868042,
"learning_rate": 3e-06,
"loss": 1.2371,
"step": 6
},
{
"epoch": 0.3848797250859107,
"grad_norm": 2.1266541481018066,
"learning_rate": 3.5000000000000004e-06,
"loss": 1.2172,
"step": 7
},
{
"epoch": 0.43986254295532645,
"grad_norm": 2.604534149169922,
"learning_rate": 4.000000000000001e-06,
"loss": 1.3291,
"step": 8
},
{
"epoch": 0.4948453608247423,
"grad_norm": 1.9917025566101074,
"learning_rate": 4.5e-06,
"loss": 1.2904,
"step": 9
},
{
"epoch": 0.5498281786941581,
"grad_norm": 2.2239606380462646,
"learning_rate": 5e-06,
"loss": 1.265,
"step": 10
},
{
"epoch": 0.6048109965635738,
"grad_norm": 2.1611850261688232,
"learning_rate": 5.500000000000001e-06,
"loss": 1.2288,
"step": 11
},
{
"epoch": 0.6597938144329897,
"grad_norm": 2.019578218460083,
"learning_rate": 6e-06,
"loss": 1.2479,
"step": 12
},
{
"epoch": 0.7147766323024055,
"grad_norm": 2.0458662509918213,
"learning_rate": 6.5000000000000004e-06,
"loss": 1.1926,
"step": 13
},
{
"epoch": 0.7697594501718213,
"grad_norm": 2.325169563293457,
"learning_rate": 7.000000000000001e-06,
"loss": 1.3024,
"step": 14
},
{
"epoch": 0.8247422680412371,
"grad_norm": 1.9853283166885376,
"learning_rate": 7.5e-06,
"loss": 1.2042,
"step": 15
},
{
"epoch": 0.8797250859106529,
"grad_norm": 1.977973461151123,
"learning_rate": 8.000000000000001e-06,
"loss": 1.2033,
"step": 16
},
{
"epoch": 0.9347079037800687,
"grad_norm": 1.934755802154541,
"learning_rate": 8.500000000000002e-06,
"loss": 1.1652,
"step": 17
},
{
"epoch": 0.9896907216494846,
"grad_norm": 1.8828901052474976,
"learning_rate": 9e-06,
"loss": 1.1532,
"step": 18
},
{
"epoch": 1.0,
"grad_norm": 2.7150800228118896,
"learning_rate": 9.5e-06,
"loss": 1.2575,
"step": 19
},
{
"epoch": 1.0549828178694158,
"grad_norm": 1.868530035018921,
"learning_rate": 1e-05,
"loss": 1.1978,
"step": 20
},
{
"epoch": 1.1099656357388317,
"grad_norm": 1.8736613988876343,
"learning_rate": 1.05e-05,
"loss": 1.1551,
"step": 21
},
{
"epoch": 1.1649484536082475,
"grad_norm": 1.703709363937378,
"learning_rate": 1.1000000000000001e-05,
"loss": 1.0762,
"step": 22
},
{
"epoch": 1.2199312714776633,
"grad_norm": 1.3763967752456665,
"learning_rate": 1.1500000000000002e-05,
"loss": 1.0799,
"step": 23
},
{
"epoch": 1.274914089347079,
"grad_norm": 1.4954501390457153,
"learning_rate": 1.2e-05,
"loss": 1.0878,
"step": 24
},
{
"epoch": 1.3298969072164948,
"grad_norm": 1.3895385265350342,
"learning_rate": 1.25e-05,
"loss": 1.0389,
"step": 25
},
{
"epoch": 1.3848797250859106,
"grad_norm": 1.2851823568344116,
"learning_rate": 1.3000000000000001e-05,
"loss": 1.111,
"step": 26
},
{
"epoch": 1.4398625429553265,
"grad_norm": 1.243282675743103,
"learning_rate": 1.3500000000000001e-05,
"loss": 1.0603,
"step": 27
},
{
"epoch": 1.4948453608247423,
"grad_norm": 1.201363205909729,
"learning_rate": 1.4000000000000001e-05,
"loss": 1.1227,
"step": 28
},
{
"epoch": 1.5498281786941581,
"grad_norm": 0.9723503589630127,
"learning_rate": 1.45e-05,
"loss": 1.0546,
"step": 29
},
{
"epoch": 1.604810996563574,
"grad_norm": 1.017640233039856,
"learning_rate": 1.5e-05,
"loss": 0.9198,
"step": 30
},
{
"epoch": 1.6597938144329896,
"grad_norm": 0.850871205329895,
"learning_rate": 1.55e-05,
"loss": 1.0753,
"step": 31
},
{
"epoch": 1.7147766323024056,
"grad_norm": 0.6708624958992004,
"learning_rate": 1.6000000000000003e-05,
"loss": 0.9494,
"step": 32
},
{
"epoch": 1.7697594501718212,
"grad_norm": 0.7221150994300842,
"learning_rate": 1.65e-05,
"loss": 0.9031,
"step": 33
},
{
"epoch": 1.824742268041237,
"grad_norm": 0.5405407547950745,
"learning_rate": 1.7000000000000003e-05,
"loss": 0.8795,
"step": 34
},
{
"epoch": 1.879725085910653,
"grad_norm": 0.46711477637290955,
"learning_rate": 1.75e-05,
"loss": 0.8542,
"step": 35
},
{
"epoch": 1.9347079037800687,
"grad_norm": 0.49648287892341614,
"learning_rate": 1.8e-05,
"loss": 0.9417,
"step": 36
},
{
"epoch": 1.9896907216494846,
"grad_norm": 0.3698864281177521,
"learning_rate": 1.85e-05,
"loss": 0.884,
"step": 37
},
{
"epoch": 2.0,
"grad_norm": 0.48004987835884094,
"learning_rate": 1.9e-05,
"loss": 0.9747,
"step": 38
},
{
"epoch": 2.0549828178694156,
"grad_norm": 0.31297996640205383,
"learning_rate": 1.9500000000000003e-05,
"loss": 0.8711,
"step": 39
},
{
"epoch": 2.1099656357388317,
"grad_norm": 0.3257434666156769,
"learning_rate": 2e-05,
"loss": 0.9167,
"step": 40
},
{
"epoch": 2.1649484536082473,
"grad_norm": 0.3740560710430145,
"learning_rate": 2.05e-05,
"loss": 0.7789,
"step": 41
},
{
"epoch": 2.2199312714776633,
"grad_norm": 0.26695382595062256,
"learning_rate": 2.1e-05,
"loss": 0.854,
"step": 42
},
{
"epoch": 2.274914089347079,
"grad_norm": 0.2634020447731018,
"learning_rate": 2.15e-05,
"loss": 0.8023,
"step": 43
},
{
"epoch": 2.329896907216495,
"grad_norm": 0.23030242323875427,
"learning_rate": 2.2000000000000003e-05,
"loss": 0.8841,
"step": 44
},
{
"epoch": 2.3848797250859106,
"grad_norm": 0.2185160219669342,
"learning_rate": 2.25e-05,
"loss": 0.7976,
"step": 45
},
{
"epoch": 2.4398625429553267,
"grad_norm": 0.18541128933429718,
"learning_rate": 2.3000000000000003e-05,
"loss": 0.8459,
"step": 46
},
{
"epoch": 2.4948453608247423,
"grad_norm": 0.21860601007938385,
"learning_rate": 2.35e-05,
"loss": 0.8594,
"step": 47
},
{
"epoch": 2.549828178694158,
"grad_norm": 0.16850370168685913,
"learning_rate": 2.4e-05,
"loss": 0.7763,
"step": 48
},
{
"epoch": 2.604810996563574,
"grad_norm": 0.18208689987659454,
"learning_rate": 2.45e-05,
"loss": 0.8106,
"step": 49
},
{
"epoch": 2.6597938144329896,
"grad_norm": 0.1723138839006424,
"learning_rate": 2.5e-05,
"loss": 0.8945,
"step": 50
},
{
"epoch": 2.7147766323024056,
"grad_norm": 0.16593343019485474,
"learning_rate": 2.5500000000000003e-05,
"loss": 0.7774,
"step": 51
},
{
"epoch": 2.7697594501718212,
"grad_norm": 0.19333742558956146,
"learning_rate": 2.6000000000000002e-05,
"loss": 0.764,
"step": 52
},
{
"epoch": 2.824742268041237,
"grad_norm": 0.1437019407749176,
"learning_rate": 2.6500000000000004e-05,
"loss": 0.7429,
"step": 53
},
{
"epoch": 2.879725085910653,
"grad_norm": 0.15184654295444489,
"learning_rate": 2.7000000000000002e-05,
"loss": 0.7774,
"step": 54
},
{
"epoch": 2.934707903780069,
"grad_norm": 0.1588866114616394,
"learning_rate": 2.7500000000000004e-05,
"loss": 0.837,
"step": 55
},
{
"epoch": 2.9896907216494846,
"grad_norm": 0.1829744577407837,
"learning_rate": 2.8000000000000003e-05,
"loss": 0.8212,
"step": 56
},
{
"epoch": 3.0,
"grad_norm": 0.22577908635139465,
"learning_rate": 2.8499999999999998e-05,
"loss": 0.7369,
"step": 57
},
{
"epoch": 3.0,
"step": 57,
"total_flos": 2.0642424594366464e+16,
"train_loss": 1.0249844183001602,
"train_runtime": 35555.0368,
"train_samples_per_second": 0.098,
"train_steps_per_second": 0.002
}
],
"logging_steps": 1.0,
"max_steps": 57,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.0642424594366464e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}