fals3's picture
Upload folder using huggingface_hub
2fd0d56 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 1365,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.002197802197802198,
"grad_norm": 0.2409743368625641,
"learning_rate": 2.18978102189781e-05,
"loss": 3.4631,
"step": 1
},
{
"epoch": 0.004395604395604396,
"grad_norm": 0.1447215974330902,
"learning_rate": 4.37956204379562e-05,
"loss": 4.8401,
"step": 2
},
{
"epoch": 0.006593406593406593,
"grad_norm": 0.26040589809417725,
"learning_rate": 6.569343065693432e-05,
"loss": 3.7198,
"step": 3
},
{
"epoch": 0.008791208791208791,
"grad_norm": 0.16445273160934448,
"learning_rate": 8.75912408759124e-05,
"loss": 3.9189,
"step": 4
},
{
"epoch": 0.01098901098901099,
"grad_norm": 0.27399495244026184,
"learning_rate": 0.00010948905109489051,
"loss": 4.0539,
"step": 5
},
{
"epoch": 0.013186813186813187,
"grad_norm": 0.16151826083660126,
"learning_rate": 0.00013138686131386864,
"loss": 4.4712,
"step": 6
},
{
"epoch": 0.015384615384615385,
"grad_norm": 0.26531946659088135,
"learning_rate": 0.00015328467153284672,
"loss": 3.1886,
"step": 7
},
{
"epoch": 0.017582417582417582,
"grad_norm": 0.25629737973213196,
"learning_rate": 0.0001751824817518248,
"loss": 3.5124,
"step": 8
},
{
"epoch": 0.01978021978021978,
"grad_norm": 0.17527121305465698,
"learning_rate": 0.00019708029197080293,
"loss": 3.3012,
"step": 9
},
{
"epoch": 0.02197802197802198,
"grad_norm": 0.19209985435009003,
"learning_rate": 0.00021897810218978101,
"loss": 4.0112,
"step": 10
},
{
"epoch": 0.024175824175824177,
"grad_norm": 0.2481723576784134,
"learning_rate": 0.00024087591240875913,
"loss": 3.3845,
"step": 11
},
{
"epoch": 0.026373626373626374,
"grad_norm": 0.20719580352306366,
"learning_rate": 0.00026277372262773727,
"loss": 4.3905,
"step": 12
},
{
"epoch": 0.02857142857142857,
"grad_norm": 0.21076622605323792,
"learning_rate": 0.00028467153284671533,
"loss": 3.3939,
"step": 13
},
{
"epoch": 0.03076923076923077,
"grad_norm": 0.18189063668251038,
"learning_rate": 0.00030656934306569345,
"loss": 4.2718,
"step": 14
},
{
"epoch": 0.03296703296703297,
"grad_norm": 0.15384311974048615,
"learning_rate": 0.00032846715328467156,
"loss": 3.9417,
"step": 15
},
{
"epoch": 0.035164835164835165,
"grad_norm": 0.08791644126176834,
"learning_rate": 0.0003503649635036496,
"loss": 4.1379,
"step": 16
},
{
"epoch": 0.03736263736263736,
"grad_norm": 0.17861421406269073,
"learning_rate": 0.00037226277372262774,
"loss": 3.7677,
"step": 17
},
{
"epoch": 0.03956043956043956,
"grad_norm": 0.2643413841724396,
"learning_rate": 0.00039416058394160585,
"loss": 4.7121,
"step": 18
},
{
"epoch": 0.041758241758241756,
"grad_norm": 0.12710094451904297,
"learning_rate": 0.00041605839416058397,
"loss": 4.4979,
"step": 19
},
{
"epoch": 0.04395604395604396,
"grad_norm": 0.14025244116783142,
"learning_rate": 0.00043795620437956203,
"loss": 3.7954,
"step": 20
},
{
"epoch": 0.046153846153846156,
"grad_norm": 0.23778130114078522,
"learning_rate": 0.0004598540145985402,
"loss": 4.1339,
"step": 21
},
{
"epoch": 0.04835164835164835,
"grad_norm": 0.2965976893901825,
"learning_rate": 0.00048175182481751826,
"loss": 3.5797,
"step": 22
},
{
"epoch": 0.05054945054945055,
"grad_norm": 0.2229406088590622,
"learning_rate": 0.0005036496350364964,
"loss": 4.5147,
"step": 23
},
{
"epoch": 0.05274725274725275,
"grad_norm": 0.186275452375412,
"learning_rate": 0.0005255474452554745,
"loss": 4.1551,
"step": 24
},
{
"epoch": 0.054945054945054944,
"grad_norm": 0.20020753145217896,
"learning_rate": 0.0005474452554744526,
"loss": 3.7933,
"step": 25
},
{
"epoch": 0.05714285714285714,
"grad_norm": 0.1915745884180069,
"learning_rate": 0.0005693430656934307,
"loss": 3.6029,
"step": 26
},
{
"epoch": 0.05934065934065934,
"grad_norm": 0.17862044274806976,
"learning_rate": 0.0005912408759124088,
"loss": 4.294,
"step": 27
},
{
"epoch": 0.06153846153846154,
"grad_norm": 0.21365594863891602,
"learning_rate": 0.0006131386861313869,
"loss": 4.1469,
"step": 28
},
{
"epoch": 0.06373626373626373,
"grad_norm": 0.10970776528120041,
"learning_rate": 0.000635036496350365,
"loss": 3.8588,
"step": 29
},
{
"epoch": 0.06593406593406594,
"grad_norm": 0.31967630982398987,
"learning_rate": 0.0006569343065693431,
"loss": 3.9553,
"step": 30
},
{
"epoch": 0.06813186813186813,
"grad_norm": 0.25783461332321167,
"learning_rate": 0.0006788321167883212,
"loss": 3.8109,
"step": 31
},
{
"epoch": 0.07032967032967033,
"grad_norm": 0.16867470741271973,
"learning_rate": 0.0007007299270072992,
"loss": 3.6247,
"step": 32
},
{
"epoch": 0.07252747252747253,
"grad_norm": 0.2316632866859436,
"learning_rate": 0.0007226277372262774,
"loss": 4.2493,
"step": 33
},
{
"epoch": 0.07472527472527472,
"grad_norm": 0.189302459359169,
"learning_rate": 0.0007445255474452555,
"loss": 4.1026,
"step": 34
},
{
"epoch": 0.07692307692307693,
"grad_norm": 0.34032320976257324,
"learning_rate": 0.0007664233576642336,
"loss": 3.6458,
"step": 35
},
{
"epoch": 0.07912087912087912,
"grad_norm": 0.2380828857421875,
"learning_rate": 0.0007883211678832117,
"loss": 3.786,
"step": 36
},
{
"epoch": 0.08131868131868132,
"grad_norm": 0.27829498052597046,
"learning_rate": 0.0008102189781021898,
"loss": 3.1697,
"step": 37
},
{
"epoch": 0.08351648351648351,
"grad_norm": 0.3624386489391327,
"learning_rate": 0.0008321167883211679,
"loss": 3.0715,
"step": 38
},
{
"epoch": 0.08571428571428572,
"grad_norm": 0.20223893225193024,
"learning_rate": 0.000854014598540146,
"loss": 2.8427,
"step": 39
},
{
"epoch": 0.08791208791208792,
"grad_norm": 0.23891288042068481,
"learning_rate": 0.0008759124087591241,
"loss": 3.8387,
"step": 40
},
{
"epoch": 0.09010989010989011,
"grad_norm": 0.18573692440986633,
"learning_rate": 0.0008978102189781023,
"loss": 4.0587,
"step": 41
},
{
"epoch": 0.09230769230769231,
"grad_norm": 0.2222813367843628,
"learning_rate": 0.0009197080291970804,
"loss": 3.6221,
"step": 42
},
{
"epoch": 0.0945054945054945,
"grad_norm": 0.16038869321346283,
"learning_rate": 0.0009416058394160585,
"loss": 4.5041,
"step": 43
},
{
"epoch": 0.0967032967032967,
"grad_norm": 0.23238258063793182,
"learning_rate": 0.0009635036496350365,
"loss": 3.1737,
"step": 44
},
{
"epoch": 0.0989010989010989,
"grad_norm": 0.28006768226623535,
"learning_rate": 0.0009854014598540146,
"loss": 3.8228,
"step": 45
},
{
"epoch": 0.1010989010989011,
"grad_norm": 0.3365144729614258,
"learning_rate": 0.0010072992700729927,
"loss": 3.3732,
"step": 46
},
{
"epoch": 0.10329670329670329,
"grad_norm": 0.4002966284751892,
"learning_rate": 0.001029197080291971,
"loss": 3.6661,
"step": 47
},
{
"epoch": 0.1054945054945055,
"grad_norm": 0.23499278724193573,
"learning_rate": 0.001051094890510949,
"loss": 4.2698,
"step": 48
},
{
"epoch": 0.1076923076923077,
"grad_norm": 0.24968664348125458,
"learning_rate": 0.001072992700729927,
"loss": 4.1431,
"step": 49
},
{
"epoch": 0.10989010989010989,
"grad_norm": 0.22199974954128265,
"learning_rate": 0.0010948905109489052,
"loss": 3.5574,
"step": 50
},
{
"epoch": 0.11208791208791209,
"grad_norm": 0.2998521625995636,
"learning_rate": 0.0011167883211678832,
"loss": 3.4442,
"step": 51
},
{
"epoch": 0.11428571428571428,
"grad_norm": 0.24236860871315002,
"learning_rate": 0.0011386861313868613,
"loss": 4.0275,
"step": 52
},
{
"epoch": 0.11648351648351649,
"grad_norm": 0.2523564398288727,
"learning_rate": 0.0011605839416058395,
"loss": 4.1493,
"step": 53
},
{
"epoch": 0.11868131868131868,
"grad_norm": 0.25490981340408325,
"learning_rate": 0.0011824817518248177,
"loss": 3.9811,
"step": 54
},
{
"epoch": 0.12087912087912088,
"grad_norm": 0.4431103467941284,
"learning_rate": 0.0012043795620437956,
"loss": 3.6094,
"step": 55
},
{
"epoch": 0.12307692307692308,
"grad_norm": 0.3580457270145416,
"learning_rate": 0.0012262773722627738,
"loss": 3.3008,
"step": 56
},
{
"epoch": 0.12527472527472527,
"grad_norm": 0.3382434546947479,
"learning_rate": 0.0012481751824817517,
"loss": 3.4361,
"step": 57
},
{
"epoch": 0.12747252747252746,
"grad_norm": 0.17469391226768494,
"learning_rate": 0.00127007299270073,
"loss": 3.0908,
"step": 58
},
{
"epoch": 0.12967032967032968,
"grad_norm": 0.18732449412345886,
"learning_rate": 0.001291970802919708,
"loss": 3.4199,
"step": 59
},
{
"epoch": 0.13186813186813187,
"grad_norm": 0.27854177355766296,
"learning_rate": 0.0013138686131386862,
"loss": 3.8876,
"step": 60
},
{
"epoch": 0.13406593406593406,
"grad_norm": 0.23652972280979156,
"learning_rate": 0.0013357664233576642,
"loss": 3.1812,
"step": 61
},
{
"epoch": 0.13626373626373625,
"grad_norm": 0.2868683636188507,
"learning_rate": 0.0013576642335766424,
"loss": 3.9795,
"step": 62
},
{
"epoch": 0.13846153846153847,
"grad_norm": 0.2507357597351074,
"learning_rate": 0.0013795620437956205,
"loss": 3.3743,
"step": 63
},
{
"epoch": 0.14065934065934066,
"grad_norm": 0.1464177519083023,
"learning_rate": 0.0014014598540145985,
"loss": 3.7366,
"step": 64
},
{
"epoch": 0.14285714285714285,
"grad_norm": 0.3048521876335144,
"learning_rate": 0.0014233576642335767,
"loss": 3.3164,
"step": 65
},
{
"epoch": 0.14505494505494507,
"grad_norm": 0.2725769281387329,
"learning_rate": 0.0014452554744525548,
"loss": 3.1478,
"step": 66
},
{
"epoch": 0.14725274725274726,
"grad_norm": 0.19293050467967987,
"learning_rate": 0.001467153284671533,
"loss": 3.8087,
"step": 67
},
{
"epoch": 0.14945054945054945,
"grad_norm": 0.22903995215892792,
"learning_rate": 0.001489051094890511,
"loss": 2.9895,
"step": 68
},
{
"epoch": 0.15164835164835164,
"grad_norm": 0.20448172092437744,
"learning_rate": 0.0015109489051094893,
"loss": 2.8988,
"step": 69
},
{
"epoch": 0.15384615384615385,
"grad_norm": 0.2247956395149231,
"learning_rate": 0.0015328467153284673,
"loss": 3.5537,
"step": 70
},
{
"epoch": 0.15604395604395604,
"grad_norm": 0.3673924207687378,
"learning_rate": 0.0015547445255474455,
"loss": 3.2633,
"step": 71
},
{
"epoch": 0.15824175824175823,
"grad_norm": 0.3656448721885681,
"learning_rate": 0.0015766423357664234,
"loss": 3.5671,
"step": 72
},
{
"epoch": 0.16043956043956045,
"grad_norm": 0.2745870053768158,
"learning_rate": 0.0015985401459854016,
"loss": 3.2916,
"step": 73
},
{
"epoch": 0.16263736263736264,
"grad_norm": 0.20720677077770233,
"learning_rate": 0.0016204379562043795,
"loss": 3.9967,
"step": 74
},
{
"epoch": 0.16483516483516483,
"grad_norm": 0.22743763029575348,
"learning_rate": 0.0016423357664233577,
"loss": 2.5638,
"step": 75
},
{
"epoch": 0.16703296703296702,
"grad_norm": 0.1443316787481308,
"learning_rate": 0.0016642335766423359,
"loss": 3.0965,
"step": 76
},
{
"epoch": 0.16923076923076924,
"grad_norm": 0.29204776883125305,
"learning_rate": 0.0016861313868613138,
"loss": 2.7744,
"step": 77
},
{
"epoch": 0.17142857142857143,
"grad_norm": 0.4143708050251007,
"learning_rate": 0.001708029197080292,
"loss": 3.1684,
"step": 78
},
{
"epoch": 0.17362637362637362,
"grad_norm": 0.2691643536090851,
"learning_rate": 0.00172992700729927,
"loss": 3.4879,
"step": 79
},
{
"epoch": 0.17582417582417584,
"grad_norm": 0.13457076251506805,
"learning_rate": 0.0017518248175182481,
"loss": 3.4612,
"step": 80
},
{
"epoch": 0.17802197802197803,
"grad_norm": 0.22509483993053436,
"learning_rate": 0.0017737226277372265,
"loss": 3.0259,
"step": 81
},
{
"epoch": 0.18021978021978022,
"grad_norm": 0.20218557119369507,
"learning_rate": 0.0017956204379562047,
"loss": 3.8595,
"step": 82
},
{
"epoch": 0.1824175824175824,
"grad_norm": 0.21320748329162598,
"learning_rate": 0.0018175182481751826,
"loss": 3.5927,
"step": 83
},
{
"epoch": 0.18461538461538463,
"grad_norm": 0.08175591379404068,
"learning_rate": 0.0018394160583941608,
"loss": 2.8849,
"step": 84
},
{
"epoch": 0.18681318681318682,
"grad_norm": 0.14934630692005157,
"learning_rate": 0.0018613138686131387,
"loss": 2.5294,
"step": 85
},
{
"epoch": 0.189010989010989,
"grad_norm": 0.24337878823280334,
"learning_rate": 0.001883211678832117,
"loss": 2.4752,
"step": 86
},
{
"epoch": 0.1912087912087912,
"grad_norm": 0.24200224876403809,
"learning_rate": 0.0019051094890510949,
"loss": 3.6156,
"step": 87
},
{
"epoch": 0.1934065934065934,
"grad_norm": 0.26793813705444336,
"learning_rate": 0.001927007299270073,
"loss": 2.6654,
"step": 88
},
{
"epoch": 0.1956043956043956,
"grad_norm": 0.12029215693473816,
"learning_rate": 0.001948905109489051,
"loss": 2.7685,
"step": 89
},
{
"epoch": 0.1978021978021978,
"grad_norm": 0.15729354321956635,
"learning_rate": 0.001970802919708029,
"loss": 2.2802,
"step": 90
},
{
"epoch": 0.2,
"grad_norm": 0.34418052434921265,
"learning_rate": 0.0019927007299270073,
"loss": 2.6971,
"step": 91
},
{
"epoch": 0.2021978021978022,
"grad_norm": 0.06955726444721222,
"learning_rate": 0.0020145985401459855,
"loss": 2.3565,
"step": 92
},
{
"epoch": 0.2043956043956044,
"grad_norm": 0.22171291708946228,
"learning_rate": 0.0020364963503649632,
"loss": 2.848,
"step": 93
},
{
"epoch": 0.20659340659340658,
"grad_norm": 0.22627265751361847,
"learning_rate": 0.002058394160583942,
"loss": 2.6717,
"step": 94
},
{
"epoch": 0.2087912087912088,
"grad_norm": 0.06811092793941498,
"learning_rate": 0.00208029197080292,
"loss": 2.4789,
"step": 95
},
{
"epoch": 0.210989010989011,
"grad_norm": 0.04275379702448845,
"learning_rate": 0.002102189781021898,
"loss": 2.5631,
"step": 96
},
{
"epoch": 0.21318681318681318,
"grad_norm": 0.14114683866500854,
"learning_rate": 0.002124087591240876,
"loss": 3.0502,
"step": 97
},
{
"epoch": 0.2153846153846154,
"grad_norm": 0.09132061153650284,
"learning_rate": 0.002145985401459854,
"loss": 3.1342,
"step": 98
},
{
"epoch": 0.2175824175824176,
"grad_norm": 0.08865886181592941,
"learning_rate": 0.0021678832116788322,
"loss": 2.9534,
"step": 99
},
{
"epoch": 0.21978021978021978,
"grad_norm": 0.025824734941124916,
"learning_rate": 0.0021897810218978104,
"loss": 3.2902,
"step": 100
},
{
"epoch": 0.22197802197802197,
"grad_norm": 0.1252056509256363,
"learning_rate": 0.002211678832116788,
"loss": 3.134,
"step": 101
},
{
"epoch": 0.22417582417582418,
"grad_norm": 0.3606067895889282,
"learning_rate": 0.0022335766423357663,
"loss": 3.1547,
"step": 102
},
{
"epoch": 0.22637362637362637,
"grad_norm": 0.07673951983451843,
"learning_rate": 0.0022554744525547445,
"loss": 3.1835,
"step": 103
},
{
"epoch": 0.22857142857142856,
"grad_norm": 0.05820150673389435,
"learning_rate": 0.0022773722627737226,
"loss": 2.7766,
"step": 104
},
{
"epoch": 0.23076923076923078,
"grad_norm": 0.08374207466840744,
"learning_rate": 0.002299270072992701,
"loss": 3.1881,
"step": 105
},
{
"epoch": 0.23296703296703297,
"grad_norm": 0.10021543502807617,
"learning_rate": 0.002321167883211679,
"loss": 2.8313,
"step": 106
},
{
"epoch": 0.23516483516483516,
"grad_norm": 0.02826845273375511,
"learning_rate": 0.002343065693430657,
"loss": 2.7479,
"step": 107
},
{
"epoch": 0.23736263736263735,
"grad_norm": 0.11248984932899475,
"learning_rate": 0.0023649635036496353,
"loss": 2.7949,
"step": 108
},
{
"epoch": 0.23956043956043957,
"grad_norm": 0.02967158518731594,
"learning_rate": 0.002386861313868613,
"loss": 2.9299,
"step": 109
},
{
"epoch": 0.24175824175824176,
"grad_norm": 0.10870092362165451,
"learning_rate": 0.0024087591240875912,
"loss": 2.5918,
"step": 110
},
{
"epoch": 0.24395604395604395,
"grad_norm": 0.12841796875,
"learning_rate": 0.0024306569343065694,
"loss": 2.4288,
"step": 111
},
{
"epoch": 0.24615384615384617,
"grad_norm": 0.1173066645860672,
"learning_rate": 0.0024525547445255476,
"loss": 2.4999,
"step": 112
},
{
"epoch": 0.24835164835164836,
"grad_norm": 0.059498704969882965,
"learning_rate": 0.0024744525547445257,
"loss": 2.8756,
"step": 113
},
{
"epoch": 0.25054945054945055,
"grad_norm": 0.06436172127723694,
"learning_rate": 0.0024963503649635035,
"loss": 2.6059,
"step": 114
},
{
"epoch": 0.25274725274725274,
"grad_norm": 0.03747984766960144,
"learning_rate": 0.0025182481751824816,
"loss": 2.2826,
"step": 115
},
{
"epoch": 0.2549450549450549,
"grad_norm": 0.18285073339939117,
"learning_rate": 0.00254014598540146,
"loss": 2.846,
"step": 116
},
{
"epoch": 0.2571428571428571,
"grad_norm": 0.04132774844765663,
"learning_rate": 0.002562043795620438,
"loss": 2.2563,
"step": 117
},
{
"epoch": 0.25934065934065936,
"grad_norm": 0.06792833656072617,
"learning_rate": 0.002583941605839416,
"loss": 2.6874,
"step": 118
},
{
"epoch": 0.26153846153846155,
"grad_norm": 0.06437716633081436,
"learning_rate": 0.0026058394160583943,
"loss": 2.6496,
"step": 119
},
{
"epoch": 0.26373626373626374,
"grad_norm": 0.02958463318645954,
"learning_rate": 0.0026277372262773725,
"loss": 2.6508,
"step": 120
},
{
"epoch": 0.26593406593406593,
"grad_norm": 0.027047540992498398,
"learning_rate": 0.0026496350364963507,
"loss": 2.9091,
"step": 121
},
{
"epoch": 0.2681318681318681,
"grad_norm": 0.022563157603144646,
"learning_rate": 0.0026715328467153284,
"loss": 2.3619,
"step": 122
},
{
"epoch": 0.2703296703296703,
"grad_norm": 0.06798495352268219,
"learning_rate": 0.0026934306569343066,
"loss": 2.7303,
"step": 123
},
{
"epoch": 0.2725274725274725,
"grad_norm": 0.02221810631453991,
"learning_rate": 0.0027153284671532847,
"loss": 2.3779,
"step": 124
},
{
"epoch": 0.27472527472527475,
"grad_norm": 0.05690629407763481,
"learning_rate": 0.002737226277372263,
"loss": 2.7081,
"step": 125
},
{
"epoch": 0.27692307692307694,
"grad_norm": 0.04338756948709488,
"learning_rate": 0.002759124087591241,
"loss": 3.0975,
"step": 126
},
{
"epoch": 0.27912087912087913,
"grad_norm": 0.0469847097992897,
"learning_rate": 0.002781021897810219,
"loss": 2.6757,
"step": 127
},
{
"epoch": 0.2813186813186813,
"grad_norm": 0.0227196142077446,
"learning_rate": 0.002802919708029197,
"loss": 2.8319,
"step": 128
},
{
"epoch": 0.2835164835164835,
"grad_norm": 0.7043052315711975,
"learning_rate": 0.002824817518248175,
"loss": 3.139,
"step": 129
},
{
"epoch": 0.2857142857142857,
"grad_norm": 0.055594831705093384,
"learning_rate": 0.0028467153284671533,
"loss": 2.5613,
"step": 130
},
{
"epoch": 0.2879120879120879,
"grad_norm": 0.08255117386579514,
"learning_rate": 0.0028686131386861315,
"loss": 2.9341,
"step": 131
},
{
"epoch": 0.29010989010989013,
"grad_norm": 0.023566026240587234,
"learning_rate": 0.0028905109489051097,
"loss": 2.0752,
"step": 132
},
{
"epoch": 0.2923076923076923,
"grad_norm": 0.033662788569927216,
"learning_rate": 0.002912408759124088,
"loss": 2.8736,
"step": 133
},
{
"epoch": 0.2945054945054945,
"grad_norm": 0.020084552466869354,
"learning_rate": 0.002934306569343066,
"loss": 3.4213,
"step": 134
},
{
"epoch": 0.2967032967032967,
"grad_norm": 0.03389180079102516,
"learning_rate": 0.0029562043795620437,
"loss": 2.8929,
"step": 135
},
{
"epoch": 0.2989010989010989,
"grad_norm": 0.03489179164171219,
"learning_rate": 0.002978102189781022,
"loss": 2.3576,
"step": 136
},
{
"epoch": 0.3010989010989011,
"grad_norm": 0.2881184220314026,
"learning_rate": 0.003,
"loss": 2.4049,
"step": 137
},
{
"epoch": 0.3032967032967033,
"grad_norm": 0.0471118688583374,
"learning_rate": 0.002997557003257329,
"loss": 2.873,
"step": 138
},
{
"epoch": 0.3054945054945055,
"grad_norm": 0.03607434779405594,
"learning_rate": 0.002995114006514658,
"loss": 1.7149,
"step": 139
},
{
"epoch": 0.3076923076923077,
"grad_norm": 0.028327681124210358,
"learning_rate": 0.002992671009771987,
"loss": 2.8203,
"step": 140
},
{
"epoch": 0.3098901098901099,
"grad_norm": 0.01758401468396187,
"learning_rate": 0.002990228013029316,
"loss": 2.6127,
"step": 141
},
{
"epoch": 0.3120879120879121,
"grad_norm": 0.019626569002866745,
"learning_rate": 0.002987785016286645,
"loss": 2.5646,
"step": 142
},
{
"epoch": 0.3142857142857143,
"grad_norm": 0.10692066699266434,
"learning_rate": 0.002985342019543974,
"loss": 2.9253,
"step": 143
},
{
"epoch": 0.31648351648351647,
"grad_norm": 0.017350753769278526,
"learning_rate": 0.002982899022801303,
"loss": 2.6919,
"step": 144
},
{
"epoch": 0.31868131868131866,
"grad_norm": 0.0960436537861824,
"learning_rate": 0.002980456026058632,
"loss": 3.3178,
"step": 145
},
{
"epoch": 0.3208791208791209,
"grad_norm": 0.030203109607100487,
"learning_rate": 0.002978013029315961,
"loss": 3.583,
"step": 146
},
{
"epoch": 0.3230769230769231,
"grad_norm": 0.020341740921139717,
"learning_rate": 0.00297557003257329,
"loss": 2.8786,
"step": 147
},
{
"epoch": 0.3252747252747253,
"grad_norm": 0.03974943235516548,
"learning_rate": 0.0029731270358306187,
"loss": 2.9432,
"step": 148
},
{
"epoch": 0.3274725274725275,
"grad_norm": 0.07958322018384933,
"learning_rate": 0.002970684039087948,
"loss": 2.6741,
"step": 149
},
{
"epoch": 0.32967032967032966,
"grad_norm": 0.04146917909383774,
"learning_rate": 0.002968241042345277,
"loss": 1.997,
"step": 150
},
{
"epoch": 0.33186813186813185,
"grad_norm": 0.023728644475340843,
"learning_rate": 0.002965798045602606,
"loss": 3.3135,
"step": 151
},
{
"epoch": 0.33406593406593404,
"grad_norm": 0.041204117238521576,
"learning_rate": 0.0029633550488599347,
"loss": 2.6001,
"step": 152
},
{
"epoch": 0.3362637362637363,
"grad_norm": 0.041832081973552704,
"learning_rate": 0.002960912052117264,
"loss": 3.0042,
"step": 153
},
{
"epoch": 0.3384615384615385,
"grad_norm": 0.03879597410559654,
"learning_rate": 0.002958469055374593,
"loss": 3.2596,
"step": 154
},
{
"epoch": 0.34065934065934067,
"grad_norm": 0.054785918444395065,
"learning_rate": 0.002956026058631922,
"loss": 2.713,
"step": 155
},
{
"epoch": 0.34285714285714286,
"grad_norm": 0.19453951716423035,
"learning_rate": 0.0029535830618892507,
"loss": 2.9748,
"step": 156
},
{
"epoch": 0.34505494505494505,
"grad_norm": 0.0177962314337492,
"learning_rate": 0.0029511400651465796,
"loss": 2.4791,
"step": 157
},
{
"epoch": 0.34725274725274724,
"grad_norm": 0.029887791723012924,
"learning_rate": 0.002948697068403909,
"loss": 3.3669,
"step": 158
},
{
"epoch": 0.34945054945054943,
"grad_norm": 0.22532090544700623,
"learning_rate": 0.002946254071661238,
"loss": 1.9057,
"step": 159
},
{
"epoch": 0.3516483516483517,
"grad_norm": 0.04401366412639618,
"learning_rate": 0.0029438110749185667,
"loss": 3.2245,
"step": 160
},
{
"epoch": 0.35384615384615387,
"grad_norm": 0.015324101783335209,
"learning_rate": 0.0029413680781758956,
"loss": 2.945,
"step": 161
},
{
"epoch": 0.35604395604395606,
"grad_norm": 0.015329068526625633,
"learning_rate": 0.002938925081433225,
"loss": 2.4361,
"step": 162
},
{
"epoch": 0.35824175824175825,
"grad_norm": 0.036454055458307266,
"learning_rate": 0.002936482084690554,
"loss": 2.3751,
"step": 163
},
{
"epoch": 0.36043956043956044,
"grad_norm": 0.040581539273262024,
"learning_rate": 0.0029340390879478827,
"loss": 2.6556,
"step": 164
},
{
"epoch": 0.3626373626373626,
"grad_norm": 0.023130884394049644,
"learning_rate": 0.0029315960912052116,
"loss": 2.912,
"step": 165
},
{
"epoch": 0.3648351648351648,
"grad_norm": 0.05891607701778412,
"learning_rate": 0.0029291530944625405,
"loss": 2.9318,
"step": 166
},
{
"epoch": 0.367032967032967,
"grad_norm": 0.03306470438838005,
"learning_rate": 0.00292671009771987,
"loss": 2.3573,
"step": 167
},
{
"epoch": 0.36923076923076925,
"grad_norm": 0.03784559294581413,
"learning_rate": 0.0029242671009771988,
"loss": 3.0028,
"step": 168
},
{
"epoch": 0.37142857142857144,
"grad_norm": 0.05403726547956467,
"learning_rate": 0.0029218241042345276,
"loss": 2.9553,
"step": 169
},
{
"epoch": 0.37362637362637363,
"grad_norm": 0.03373740613460541,
"learning_rate": 0.0029193811074918565,
"loss": 2.5937,
"step": 170
},
{
"epoch": 0.3758241758241758,
"grad_norm": 0.1475343108177185,
"learning_rate": 0.002916938110749186,
"loss": 2.5746,
"step": 171
},
{
"epoch": 0.378021978021978,
"grad_norm": 0.023519936949014664,
"learning_rate": 0.0029144951140065148,
"loss": 2.604,
"step": 172
},
{
"epoch": 0.3802197802197802,
"grad_norm": 0.06090182811021805,
"learning_rate": 0.0029120521172638436,
"loss": 2.5156,
"step": 173
},
{
"epoch": 0.3824175824175824,
"grad_norm": 0.2844042479991913,
"learning_rate": 0.0029096091205211725,
"loss": 3.578,
"step": 174
},
{
"epoch": 0.38461538461538464,
"grad_norm": 0.02431316487491131,
"learning_rate": 0.002907166123778502,
"loss": 2.823,
"step": 175
},
{
"epoch": 0.3868131868131868,
"grad_norm": 0.20494656264781952,
"learning_rate": 0.0029047231270358308,
"loss": 2.9553,
"step": 176
},
{
"epoch": 0.389010989010989,
"grad_norm": 0.019358966499567032,
"learning_rate": 0.0029022801302931596,
"loss": 2.9102,
"step": 177
},
{
"epoch": 0.3912087912087912,
"grad_norm": 0.068114273250103,
"learning_rate": 0.0028998371335504885,
"loss": 3.1654,
"step": 178
},
{
"epoch": 0.3934065934065934,
"grad_norm": 0.017963459715247154,
"learning_rate": 0.0028973941368078174,
"loss": 2.6325,
"step": 179
},
{
"epoch": 0.3956043956043956,
"grad_norm": 0.10933779180049896,
"learning_rate": 0.0028949511400651468,
"loss": 2.305,
"step": 180
},
{
"epoch": 0.3978021978021978,
"grad_norm": 0.11619806289672852,
"learning_rate": 0.0028925081433224756,
"loss": 2.6951,
"step": 181
},
{
"epoch": 0.4,
"grad_norm": 0.026240166276693344,
"learning_rate": 0.0028900651465798045,
"loss": 3.232,
"step": 182
},
{
"epoch": 0.4021978021978022,
"grad_norm": 0.053106650710105896,
"learning_rate": 0.0028876221498371334,
"loss": 2.4864,
"step": 183
},
{
"epoch": 0.4043956043956044,
"grad_norm": 0.03956475853919983,
"learning_rate": 0.0028851791530944628,
"loss": 2.9587,
"step": 184
},
{
"epoch": 0.4065934065934066,
"grad_norm": 0.20161324739456177,
"learning_rate": 0.0028827361563517916,
"loss": 3.0948,
"step": 185
},
{
"epoch": 0.4087912087912088,
"grad_norm": 0.02601202391088009,
"learning_rate": 0.0028802931596091205,
"loss": 2.7829,
"step": 186
},
{
"epoch": 0.41098901098901097,
"grad_norm": 0.02373082935810089,
"learning_rate": 0.0028778501628664494,
"loss": 2.4965,
"step": 187
},
{
"epoch": 0.41318681318681316,
"grad_norm": 0.020534301176667213,
"learning_rate": 0.0028754071661237783,
"loss": 2.8745,
"step": 188
},
{
"epoch": 0.4153846153846154,
"grad_norm": 0.02831609733402729,
"learning_rate": 0.0028729641693811077,
"loss": 2.358,
"step": 189
},
{
"epoch": 0.4175824175824176,
"grad_norm": 0.033399827778339386,
"learning_rate": 0.0028705211726384365,
"loss": 2.9683,
"step": 190
},
{
"epoch": 0.4197802197802198,
"grad_norm": 0.2932700514793396,
"learning_rate": 0.0028680781758957654,
"loss": 2.9164,
"step": 191
},
{
"epoch": 0.421978021978022,
"grad_norm": 0.029433993622660637,
"learning_rate": 0.0028656351791530943,
"loss": 3.0349,
"step": 192
},
{
"epoch": 0.42417582417582417,
"grad_norm": 0.11126076430082321,
"learning_rate": 0.0028631921824104237,
"loss": 3.2494,
"step": 193
},
{
"epoch": 0.42637362637362636,
"grad_norm": 0.021345842629671097,
"learning_rate": 0.0028607491856677525,
"loss": 3.6874,
"step": 194
},
{
"epoch": 0.42857142857142855,
"grad_norm": 0.023012064397335052,
"learning_rate": 0.0028583061889250814,
"loss": 2.8887,
"step": 195
},
{
"epoch": 0.4307692307692308,
"grad_norm": 0.08672553300857544,
"learning_rate": 0.0028558631921824103,
"loss": 3.1926,
"step": 196
},
{
"epoch": 0.432967032967033,
"grad_norm": 0.03434199467301369,
"learning_rate": 0.0028534201954397392,
"loss": 3.4274,
"step": 197
},
{
"epoch": 0.4351648351648352,
"grad_norm": 0.01159977912902832,
"learning_rate": 0.0028509771986970685,
"loss": 2.5823,
"step": 198
},
{
"epoch": 0.43736263736263736,
"grad_norm": 0.024322574958205223,
"learning_rate": 0.0028485342019543974,
"loss": 3.0971,
"step": 199
},
{
"epoch": 0.43956043956043955,
"grad_norm": 0.04011755436658859,
"learning_rate": 0.0028460912052117263,
"loss": 2.7002,
"step": 200
},
{
"epoch": 0.44175824175824174,
"grad_norm": 0.04182285815477371,
"learning_rate": 0.0028436482084690552,
"loss": 2.7995,
"step": 201
},
{
"epoch": 0.44395604395604393,
"grad_norm": 0.1035466268658638,
"learning_rate": 0.0028412052117263845,
"loss": 2.7812,
"step": 202
},
{
"epoch": 0.4461538461538462,
"grad_norm": 0.03022894449532032,
"learning_rate": 0.0028387622149837134,
"loss": 2.5728,
"step": 203
},
{
"epoch": 0.44835164835164837,
"grad_norm": 0.01947683095932007,
"learning_rate": 0.0028363192182410423,
"loss": 2.5337,
"step": 204
},
{
"epoch": 0.45054945054945056,
"grad_norm": 0.0220695398747921,
"learning_rate": 0.0028338762214983712,
"loss": 2.2895,
"step": 205
},
{
"epoch": 0.45274725274725275,
"grad_norm": 0.024109691381454468,
"learning_rate": 0.0028314332247557,
"loss": 2.5012,
"step": 206
},
{
"epoch": 0.45494505494505494,
"grad_norm": 0.2180580347776413,
"learning_rate": 0.0028289902280130294,
"loss": 2.6988,
"step": 207
},
{
"epoch": 0.45714285714285713,
"grad_norm": 0.01809028349816799,
"learning_rate": 0.0028265472312703583,
"loss": 2.563,
"step": 208
},
{
"epoch": 0.4593406593406593,
"grad_norm": 0.015351408161222935,
"learning_rate": 0.0028241042345276872,
"loss": 3.0695,
"step": 209
},
{
"epoch": 0.46153846153846156,
"grad_norm": 0.020464500412344933,
"learning_rate": 0.002821661237785016,
"loss": 2.4209,
"step": 210
},
{
"epoch": 0.46373626373626375,
"grad_norm": 0.01762108877301216,
"learning_rate": 0.0028192182410423454,
"loss": 3.6021,
"step": 211
},
{
"epoch": 0.46593406593406594,
"grad_norm": 0.02172766998410225,
"learning_rate": 0.0028167752442996743,
"loss": 3.368,
"step": 212
},
{
"epoch": 0.46813186813186813,
"grad_norm": 0.08630727231502533,
"learning_rate": 0.0028143322475570032,
"loss": 2.8901,
"step": 213
},
{
"epoch": 0.4703296703296703,
"grad_norm": 0.0335206612944603,
"learning_rate": 0.002811889250814332,
"loss": 2.8626,
"step": 214
},
{
"epoch": 0.4725274725274725,
"grad_norm": 0.031164107844233513,
"learning_rate": 0.002809446254071661,
"loss": 2.1249,
"step": 215
},
{
"epoch": 0.4747252747252747,
"grad_norm": 0.010478016920387745,
"learning_rate": 0.0028070032573289903,
"loss": 2.109,
"step": 216
},
{
"epoch": 0.47692307692307695,
"grad_norm": 0.04877478629350662,
"learning_rate": 0.0028045602605863192,
"loss": 3.2266,
"step": 217
},
{
"epoch": 0.47912087912087914,
"grad_norm": 0.04495551437139511,
"learning_rate": 0.002802117263843648,
"loss": 2.7422,
"step": 218
},
{
"epoch": 0.48131868131868133,
"grad_norm": 0.012712336145341396,
"learning_rate": 0.002799674267100977,
"loss": 3.0572,
"step": 219
},
{
"epoch": 0.4835164835164835,
"grad_norm": 0.014513728208839893,
"learning_rate": 0.0027972312703583063,
"loss": 2.5498,
"step": 220
},
{
"epoch": 0.4857142857142857,
"grad_norm": 0.029882153496146202,
"learning_rate": 0.0027947882736156352,
"loss": 2.3386,
"step": 221
},
{
"epoch": 0.4879120879120879,
"grad_norm": 0.015184939838945866,
"learning_rate": 0.002792345276872964,
"loss": 3.4125,
"step": 222
},
{
"epoch": 0.4901098901098901,
"grad_norm": 0.017641998827457428,
"learning_rate": 0.002789902280130293,
"loss": 2.8412,
"step": 223
},
{
"epoch": 0.49230769230769234,
"grad_norm": 0.08622776716947556,
"learning_rate": 0.002787459283387622,
"loss": 3.3587,
"step": 224
},
{
"epoch": 0.4945054945054945,
"grad_norm": 0.01725643314421177,
"learning_rate": 0.0027850162866449512,
"loss": 3.0997,
"step": 225
},
{
"epoch": 0.4967032967032967,
"grad_norm": 0.04015418514609337,
"learning_rate": 0.00278257328990228,
"loss": 2.8006,
"step": 226
},
{
"epoch": 0.4989010989010989,
"grad_norm": 0.02601313777267933,
"learning_rate": 0.002780130293159609,
"loss": 2.124,
"step": 227
},
{
"epoch": 0.5010989010989011,
"grad_norm": 0.05065767094492912,
"learning_rate": 0.002777687296416938,
"loss": 2.3722,
"step": 228
},
{
"epoch": 0.5032967032967033,
"grad_norm": 0.01822665147483349,
"learning_rate": 0.0027752442996742672,
"loss": 3.1745,
"step": 229
},
{
"epoch": 0.5054945054945055,
"grad_norm": 0.017541702836751938,
"learning_rate": 0.002772801302931596,
"loss": 2.9303,
"step": 230
},
{
"epoch": 0.5076923076923077,
"grad_norm": 0.02340563014149666,
"learning_rate": 0.002770358306188925,
"loss": 2.5377,
"step": 231
},
{
"epoch": 0.5098901098901099,
"grad_norm": 0.019261302426457405,
"learning_rate": 0.002767915309446254,
"loss": 2.5349,
"step": 232
},
{
"epoch": 0.512087912087912,
"grad_norm": 0.016932491213083267,
"learning_rate": 0.002765472312703583,
"loss": 2.716,
"step": 233
},
{
"epoch": 0.5142857142857142,
"grad_norm": 0.043118786066770554,
"learning_rate": 0.002763029315960912,
"loss": 2.7294,
"step": 234
},
{
"epoch": 0.5164835164835165,
"grad_norm": 0.024091746658086777,
"learning_rate": 0.002760586319218241,
"loss": 2.8986,
"step": 235
},
{
"epoch": 0.5186813186813187,
"grad_norm": 0.0369073748588562,
"learning_rate": 0.00275814332247557,
"loss": 2.5213,
"step": 236
},
{
"epoch": 0.5208791208791209,
"grad_norm": 0.019394991919398308,
"learning_rate": 0.002755700325732899,
"loss": 3.3537,
"step": 237
},
{
"epoch": 0.5230769230769231,
"grad_norm": 0.047848187386989594,
"learning_rate": 0.002753257328990228,
"loss": 2.6109,
"step": 238
},
{
"epoch": 0.5252747252747253,
"grad_norm": 0.018728669732809067,
"learning_rate": 0.002750814332247557,
"loss": 2.5887,
"step": 239
},
{
"epoch": 0.5274725274725275,
"grad_norm": 0.026545386761426926,
"learning_rate": 0.002748371335504886,
"loss": 2.1622,
"step": 240
},
{
"epoch": 0.5296703296703297,
"grad_norm": 0.179373636841774,
"learning_rate": 0.002745928338762215,
"loss": 2.3254,
"step": 241
},
{
"epoch": 0.5318681318681319,
"grad_norm": 0.02221335843205452,
"learning_rate": 0.002743485342019544,
"loss": 2.3591,
"step": 242
},
{
"epoch": 0.5340659340659341,
"grad_norm": 0.023070555180311203,
"learning_rate": 0.002741042345276873,
"loss": 2.4131,
"step": 243
},
{
"epoch": 0.5362637362637362,
"grad_norm": 0.04896058142185211,
"learning_rate": 0.002738599348534202,
"loss": 2.6444,
"step": 244
},
{
"epoch": 0.5384615384615384,
"grad_norm": 0.02032623626291752,
"learning_rate": 0.002736156351791531,
"loss": 2.8966,
"step": 245
},
{
"epoch": 0.5406593406593406,
"grad_norm": 0.09960606694221497,
"learning_rate": 0.0027337133550488597,
"loss": 2.6042,
"step": 246
},
{
"epoch": 0.5428571428571428,
"grad_norm": 0.033577460795640945,
"learning_rate": 0.002731270358306189,
"loss": 2.9269,
"step": 247
},
{
"epoch": 0.545054945054945,
"grad_norm": 0.037454430013895035,
"learning_rate": 0.002728827361563518,
"loss": 3.0617,
"step": 248
},
{
"epoch": 0.5472527472527473,
"grad_norm": 0.027549983933568,
"learning_rate": 0.002726384364820847,
"loss": 2.1732,
"step": 249
},
{
"epoch": 0.5494505494505495,
"grad_norm": 0.07877380400896072,
"learning_rate": 0.0027239413680781757,
"loss": 2.2845,
"step": 250
},
{
"epoch": 0.5516483516483517,
"grad_norm": 0.01758957840502262,
"learning_rate": 0.002721498371335505,
"loss": 2.2879,
"step": 251
},
{
"epoch": 0.5538461538461539,
"grad_norm": 0.01929297484457493,
"learning_rate": 0.002719055374592834,
"loss": 2.4288,
"step": 252
},
{
"epoch": 0.5560439560439561,
"grad_norm": 0.016306472942233086,
"learning_rate": 0.002716612377850163,
"loss": 2.3413,
"step": 253
},
{
"epoch": 0.5582417582417583,
"grad_norm": 0.589772641658783,
"learning_rate": 0.0027141693811074917,
"loss": 2.51,
"step": 254
},
{
"epoch": 0.5604395604395604,
"grad_norm": 0.01618817262351513,
"learning_rate": 0.0027117263843648206,
"loss": 2.1043,
"step": 255
},
{
"epoch": 0.5626373626373626,
"grad_norm": 0.201416477560997,
"learning_rate": 0.00270928338762215,
"loss": 2.3298,
"step": 256
},
{
"epoch": 0.5648351648351648,
"grad_norm": 0.015062879770994186,
"learning_rate": 0.002706840390879479,
"loss": 3.0441,
"step": 257
},
{
"epoch": 0.567032967032967,
"grad_norm": 0.43376973271369934,
"learning_rate": 0.0027043973941368077,
"loss": 2.4653,
"step": 258
},
{
"epoch": 0.5692307692307692,
"grad_norm": 0.02954583615064621,
"learning_rate": 0.0027019543973941366,
"loss": 2.3936,
"step": 259
},
{
"epoch": 0.5714285714285714,
"grad_norm": 0.04767719283699989,
"learning_rate": 0.002699511400651466,
"loss": 3.3216,
"step": 260
},
{
"epoch": 0.5736263736263736,
"grad_norm": 0.02140074037015438,
"learning_rate": 0.002697068403908795,
"loss": 2.8209,
"step": 261
},
{
"epoch": 0.5758241758241758,
"grad_norm": 0.015454425476491451,
"learning_rate": 0.0026946254071661237,
"loss": 2.505,
"step": 262
},
{
"epoch": 0.578021978021978,
"grad_norm": 0.16158735752105713,
"learning_rate": 0.0026921824104234526,
"loss": 2.0559,
"step": 263
},
{
"epoch": 0.5802197802197803,
"grad_norm": 0.020917022600769997,
"learning_rate": 0.0026897394136807815,
"loss": 2.2346,
"step": 264
},
{
"epoch": 0.5824175824175825,
"grad_norm": 0.034949660301208496,
"learning_rate": 0.002687296416938111,
"loss": 2.3979,
"step": 265
},
{
"epoch": 0.5846153846153846,
"grad_norm": 0.016119999811053276,
"learning_rate": 0.0026848534201954397,
"loss": 3.029,
"step": 266
},
{
"epoch": 0.5868131868131868,
"grad_norm": 0.23154503107070923,
"learning_rate": 0.0026824104234527686,
"loss": 2.7569,
"step": 267
},
{
"epoch": 0.589010989010989,
"grad_norm": 0.010933191515505314,
"learning_rate": 0.0026799674267100975,
"loss": 1.978,
"step": 268
},
{
"epoch": 0.5912087912087912,
"grad_norm": 0.02948264218866825,
"learning_rate": 0.002677524429967427,
"loss": 2.5674,
"step": 269
},
{
"epoch": 0.5934065934065934,
"grad_norm": 0.06428027153015137,
"learning_rate": 0.0026750814332247557,
"loss": 2.9335,
"step": 270
},
{
"epoch": 0.5956043956043956,
"grad_norm": 0.052733179181814194,
"learning_rate": 0.0026726384364820846,
"loss": 2.3666,
"step": 271
},
{
"epoch": 0.5978021978021978,
"grad_norm": 0.2249625325202942,
"learning_rate": 0.0026701954397394135,
"loss": 2.1986,
"step": 272
},
{
"epoch": 0.6,
"grad_norm": 0.018427258357405663,
"learning_rate": 0.0026677524429967424,
"loss": 2.351,
"step": 273
},
{
"epoch": 0.6021978021978022,
"grad_norm": 0.019065450876951218,
"learning_rate": 0.0026653094462540717,
"loss": 2.5633,
"step": 274
},
{
"epoch": 0.6043956043956044,
"grad_norm": 0.07239595055580139,
"learning_rate": 0.0026628664495114006,
"loss": 3.3265,
"step": 275
},
{
"epoch": 0.6065934065934065,
"grad_norm": 0.016410227864980698,
"learning_rate": 0.0026604234527687295,
"loss": 2.5218,
"step": 276
},
{
"epoch": 0.6087912087912087,
"grad_norm": 0.019121574237942696,
"learning_rate": 0.0026579804560260584,
"loss": 2.4198,
"step": 277
},
{
"epoch": 0.610989010989011,
"grad_norm": 0.016775179654359818,
"learning_rate": 0.0026555374592833877,
"loss": 2.2227,
"step": 278
},
{
"epoch": 0.6131868131868132,
"grad_norm": 0.025049753487110138,
"learning_rate": 0.0026530944625407166,
"loss": 3.0566,
"step": 279
},
{
"epoch": 0.6153846153846154,
"grad_norm": 0.012837176211178303,
"learning_rate": 0.0026506514657980455,
"loss": 2.6512,
"step": 280
},
{
"epoch": 0.6175824175824176,
"grad_norm": 0.017170587554574013,
"learning_rate": 0.0026482084690553744,
"loss": 2.1988,
"step": 281
},
{
"epoch": 0.6197802197802198,
"grad_norm": 0.0168058592826128,
"learning_rate": 0.0026457654723127033,
"loss": 1.7384,
"step": 282
},
{
"epoch": 0.621978021978022,
"grad_norm": 0.11843164265155792,
"learning_rate": 0.0026433224755700326,
"loss": 3.1681,
"step": 283
},
{
"epoch": 0.6241758241758242,
"grad_norm": 0.011939460411667824,
"learning_rate": 0.0026408794788273615,
"loss": 2.5616,
"step": 284
},
{
"epoch": 0.6263736263736264,
"grad_norm": 0.031566470861434937,
"learning_rate": 0.0026384364820846904,
"loss": 2.5451,
"step": 285
},
{
"epoch": 0.6285714285714286,
"grad_norm": 0.1022961363196373,
"learning_rate": 0.0026359934853420193,
"loss": 3.5236,
"step": 286
},
{
"epoch": 0.6307692307692307,
"grad_norm": 0.014717593789100647,
"learning_rate": 0.0026335504885993486,
"loss": 3.1229,
"step": 287
},
{
"epoch": 0.6329670329670329,
"grad_norm": 0.01979239098727703,
"learning_rate": 0.0026311074918566775,
"loss": 2.3823,
"step": 288
},
{
"epoch": 0.6351648351648351,
"grad_norm": 0.045468974858522415,
"learning_rate": 0.0026286644951140064,
"loss": 2.9849,
"step": 289
},
{
"epoch": 0.6373626373626373,
"grad_norm": 0.019398022443056107,
"learning_rate": 0.0026262214983713353,
"loss": 3.7119,
"step": 290
},
{
"epoch": 0.6395604395604395,
"grad_norm": 0.02326684258878231,
"learning_rate": 0.0026237785016286646,
"loss": 3.276,
"step": 291
},
{
"epoch": 0.6417582417582418,
"grad_norm": 0.023854602128267288,
"learning_rate": 0.0026213355048859935,
"loss": 2.6155,
"step": 292
},
{
"epoch": 0.643956043956044,
"grad_norm": 0.013441305607557297,
"learning_rate": 0.002618892508143323,
"loss": 1.8136,
"step": 293
},
{
"epoch": 0.6461538461538462,
"grad_norm": 0.021931316703557968,
"learning_rate": 0.0026164495114006517,
"loss": 2.2994,
"step": 294
},
{
"epoch": 0.6483516483516484,
"grad_norm": 0.01898212730884552,
"learning_rate": 0.0026140065146579806,
"loss": 2.1773,
"step": 295
},
{
"epoch": 0.6505494505494506,
"grad_norm": 0.021936513483524323,
"learning_rate": 0.0026115635179153095,
"loss": 2.3289,
"step": 296
},
{
"epoch": 0.6527472527472528,
"grad_norm": 0.05151190608739853,
"learning_rate": 0.002609120521172639,
"loss": 2.6804,
"step": 297
},
{
"epoch": 0.654945054945055,
"grad_norm": 0.020581930875778198,
"learning_rate": 0.0026066775244299677,
"loss": 2.8738,
"step": 298
},
{
"epoch": 0.6571428571428571,
"grad_norm": 0.04589063674211502,
"learning_rate": 0.0026042345276872966,
"loss": 2.9824,
"step": 299
},
{
"epoch": 0.6593406593406593,
"grad_norm": 0.0197887122631073,
"learning_rate": 0.0026017915309446255,
"loss": 2.7632,
"step": 300
},
{
"epoch": 0.6615384615384615,
"grad_norm": 0.012880639173090458,
"learning_rate": 0.0025993485342019544,
"loss": 2.2645,
"step": 301
},
{
"epoch": 0.6637362637362637,
"grad_norm": 0.021973341703414917,
"learning_rate": 0.0025969055374592837,
"loss": 2.7172,
"step": 302
},
{
"epoch": 0.6659340659340659,
"grad_norm": 0.02554718218743801,
"learning_rate": 0.0025944625407166126,
"loss": 2.4676,
"step": 303
},
{
"epoch": 0.6681318681318681,
"grad_norm": 0.018171951174736023,
"learning_rate": 0.0025920195439739415,
"loss": 1.95,
"step": 304
},
{
"epoch": 0.6703296703296703,
"grad_norm": 0.06418383121490479,
"learning_rate": 0.0025895765472312704,
"loss": 3.1376,
"step": 305
},
{
"epoch": 0.6725274725274726,
"grad_norm": 0.019107403233647346,
"learning_rate": 0.0025871335504885997,
"loss": 2.8239,
"step": 306
},
{
"epoch": 0.6747252747252748,
"grad_norm": 0.01850311830639839,
"learning_rate": 0.0025846905537459286,
"loss": 3.0085,
"step": 307
},
{
"epoch": 0.676923076923077,
"grad_norm": 0.04133279249072075,
"learning_rate": 0.0025822475570032575,
"loss": 2.7139,
"step": 308
},
{
"epoch": 0.6791208791208792,
"grad_norm": 0.014630049467086792,
"learning_rate": 0.0025798045602605864,
"loss": 2.6816,
"step": 309
},
{
"epoch": 0.6813186813186813,
"grad_norm": 0.01793878898024559,
"learning_rate": 0.0025773615635179153,
"loss": 2.5418,
"step": 310
},
{
"epoch": 0.6835164835164835,
"grad_norm": 0.8063828349113464,
"learning_rate": 0.0025749185667752446,
"loss": 2.5688,
"step": 311
},
{
"epoch": 0.6857142857142857,
"grad_norm": 0.2959175407886505,
"learning_rate": 0.0025724755700325735,
"loss": 2.9266,
"step": 312
},
{
"epoch": 0.6879120879120879,
"grad_norm": 0.027081279084086418,
"learning_rate": 0.0025700325732899024,
"loss": 2.6651,
"step": 313
},
{
"epoch": 0.6901098901098901,
"grad_norm": 0.11023405194282532,
"learning_rate": 0.0025675895765472313,
"loss": 2.6369,
"step": 314
},
{
"epoch": 0.6923076923076923,
"grad_norm": 0.4252738356590271,
"learning_rate": 0.0025651465798045606,
"loss": 2.6547,
"step": 315
},
{
"epoch": 0.6945054945054945,
"grad_norm": 0.01679593324661255,
"learning_rate": 0.0025627035830618895,
"loss": 3.6429,
"step": 316
},
{
"epoch": 0.6967032967032967,
"grad_norm": 0.034296032041311264,
"learning_rate": 0.0025602605863192184,
"loss": 2.7118,
"step": 317
},
{
"epoch": 0.6989010989010989,
"grad_norm": 0.010354156605899334,
"learning_rate": 0.0025578175895765473,
"loss": 2.3487,
"step": 318
},
{
"epoch": 0.701098901098901,
"grad_norm": 0.027670569717884064,
"learning_rate": 0.0025553745928338766,
"loss": 3.1567,
"step": 319
},
{
"epoch": 0.7032967032967034,
"grad_norm": 0.06830604374408722,
"learning_rate": 0.0025529315960912055,
"loss": 2.1537,
"step": 320
},
{
"epoch": 0.7054945054945055,
"grad_norm": 0.025308869779109955,
"learning_rate": 0.0025504885993485344,
"loss": 3.325,
"step": 321
},
{
"epoch": 0.7076923076923077,
"grad_norm": 0.05852383375167847,
"learning_rate": 0.0025480456026058633,
"loss": 2.4071,
"step": 322
},
{
"epoch": 0.7098901098901099,
"grad_norm": 0.054120082408189774,
"learning_rate": 0.002545602605863192,
"loss": 2.3863,
"step": 323
},
{
"epoch": 0.7120879120879121,
"grad_norm": 0.010777938179671764,
"learning_rate": 0.0025431596091205215,
"loss": 2.0294,
"step": 324
},
{
"epoch": 0.7142857142857143,
"grad_norm": 0.045072928071022034,
"learning_rate": 0.0025407166123778504,
"loss": 3.0899,
"step": 325
},
{
"epoch": 0.7164835164835165,
"grad_norm": 0.015996824949979782,
"learning_rate": 0.0025382736156351793,
"loss": 2.2157,
"step": 326
},
{
"epoch": 0.7186813186813187,
"grad_norm": 0.03813471272587776,
"learning_rate": 0.002535830618892508,
"loss": 2.6407,
"step": 327
},
{
"epoch": 0.7208791208791209,
"grad_norm": 0.01721225492656231,
"learning_rate": 0.0025333876221498375,
"loss": 2.5888,
"step": 328
},
{
"epoch": 0.7230769230769231,
"grad_norm": 0.016883473843336105,
"learning_rate": 0.0025309446254071664,
"loss": 3.1479,
"step": 329
},
{
"epoch": 0.7252747252747253,
"grad_norm": 0.021014297381043434,
"learning_rate": 0.0025285016286644953,
"loss": 2.9271,
"step": 330
},
{
"epoch": 0.7274725274725274,
"grad_norm": 0.048991672694683075,
"learning_rate": 0.002526058631921824,
"loss": 1.9598,
"step": 331
},
{
"epoch": 0.7296703296703296,
"grad_norm": 0.05304265767335892,
"learning_rate": 0.002523615635179153,
"loss": 2.6917,
"step": 332
},
{
"epoch": 0.7318681318681318,
"grad_norm": 0.03584013879299164,
"learning_rate": 0.0025211726384364824,
"loss": 1.9173,
"step": 333
},
{
"epoch": 0.734065934065934,
"grad_norm": 0.04524427279829979,
"learning_rate": 0.0025187296416938113,
"loss": 3.075,
"step": 334
},
{
"epoch": 0.7362637362637363,
"grad_norm": 0.017001923173666,
"learning_rate": 0.00251628664495114,
"loss": 2.9166,
"step": 335
},
{
"epoch": 0.7384615384615385,
"grad_norm": 0.014898216351866722,
"learning_rate": 0.002513843648208469,
"loss": 2.9831,
"step": 336
},
{
"epoch": 0.7406593406593407,
"grad_norm": 0.017777390778064728,
"learning_rate": 0.0025114006514657984,
"loss": 3.2579,
"step": 337
},
{
"epoch": 0.7428571428571429,
"grad_norm": 0.015360230579972267,
"learning_rate": 0.0025089576547231273,
"loss": 2.4163,
"step": 338
},
{
"epoch": 0.7450549450549451,
"grad_norm": 0.11719657480716705,
"learning_rate": 0.002506514657980456,
"loss": 2.5226,
"step": 339
},
{
"epoch": 0.7472527472527473,
"grad_norm": 0.07239314168691635,
"learning_rate": 0.002504071661237785,
"loss": 2.4277,
"step": 340
},
{
"epoch": 0.7494505494505495,
"grad_norm": 0.013682070188224316,
"learning_rate": 0.002501628664495114,
"loss": 3.6343,
"step": 341
},
{
"epoch": 0.7516483516483516,
"grad_norm": 0.016054898500442505,
"learning_rate": 0.0024991856677524433,
"loss": 2.2591,
"step": 342
},
{
"epoch": 0.7538461538461538,
"grad_norm": 0.018129508942365646,
"learning_rate": 0.002496742671009772,
"loss": 2.9612,
"step": 343
},
{
"epoch": 0.756043956043956,
"grad_norm": 0.01597229205071926,
"learning_rate": 0.002494299674267101,
"loss": 3.0267,
"step": 344
},
{
"epoch": 0.7582417582417582,
"grad_norm": 0.019353987649083138,
"learning_rate": 0.00249185667752443,
"loss": 1.9071,
"step": 345
},
{
"epoch": 0.7604395604395604,
"grad_norm": 0.012818114832043648,
"learning_rate": 0.0024894136807817593,
"loss": 2.9222,
"step": 346
},
{
"epoch": 0.7626373626373626,
"grad_norm": 0.022941293194890022,
"learning_rate": 0.002486970684039088,
"loss": 2.642,
"step": 347
},
{
"epoch": 0.7648351648351648,
"grad_norm": 0.012771750800311565,
"learning_rate": 0.002484527687296417,
"loss": 2.2237,
"step": 348
},
{
"epoch": 0.7670329670329671,
"grad_norm": 0.018484385684132576,
"learning_rate": 0.002482084690553746,
"loss": 2.14,
"step": 349
},
{
"epoch": 0.7692307692307693,
"grad_norm": 0.02337447553873062,
"learning_rate": 0.002479641693811075,
"loss": 2.5202,
"step": 350
},
{
"epoch": 0.7714285714285715,
"grad_norm": 0.08466807752847672,
"learning_rate": 0.002477198697068404,
"loss": 3.1289,
"step": 351
},
{
"epoch": 0.7736263736263737,
"grad_norm": 0.04180469363927841,
"learning_rate": 0.002474755700325733,
"loss": 2.7724,
"step": 352
},
{
"epoch": 0.7758241758241758,
"grad_norm": 0.015058220364153385,
"learning_rate": 0.002472312703583062,
"loss": 3.1973,
"step": 353
},
{
"epoch": 0.778021978021978,
"grad_norm": 0.1619366705417633,
"learning_rate": 0.002469869706840391,
"loss": 2.9995,
"step": 354
},
{
"epoch": 0.7802197802197802,
"grad_norm": 0.7699611783027649,
"learning_rate": 0.00246742671009772,
"loss": 2.3231,
"step": 355
},
{
"epoch": 0.7824175824175824,
"grad_norm": 0.013024060055613518,
"learning_rate": 0.002464983713355049,
"loss": 2.0517,
"step": 356
},
{
"epoch": 0.7846153846153846,
"grad_norm": 0.034511227160692215,
"learning_rate": 0.002462540716612378,
"loss": 2.6967,
"step": 357
},
{
"epoch": 0.7868131868131868,
"grad_norm": 0.24867047369480133,
"learning_rate": 0.002460097719869707,
"loss": 2.89,
"step": 358
},
{
"epoch": 0.789010989010989,
"grad_norm": 0.042237743735313416,
"learning_rate": 0.0024576547231270358,
"loss": 3.3183,
"step": 359
},
{
"epoch": 0.7912087912087912,
"grad_norm": 0.01658768020570278,
"learning_rate": 0.002455211726384365,
"loss": 2.5096,
"step": 360
},
{
"epoch": 0.7934065934065934,
"grad_norm": 0.06253362447023392,
"learning_rate": 0.002452768729641694,
"loss": 2.5071,
"step": 361
},
{
"epoch": 0.7956043956043956,
"grad_norm": 0.024494202807545662,
"learning_rate": 0.002450325732899023,
"loss": 2.6475,
"step": 362
},
{
"epoch": 0.7978021978021979,
"grad_norm": 0.01528454851359129,
"learning_rate": 0.0024478827361563518,
"loss": 2.73,
"step": 363
},
{
"epoch": 0.8,
"grad_norm": 0.06198682636022568,
"learning_rate": 0.002445439739413681,
"loss": 1.9405,
"step": 364
},
{
"epoch": 0.8021978021978022,
"grad_norm": 0.0702700987458229,
"learning_rate": 0.00244299674267101,
"loss": 3.565,
"step": 365
},
{
"epoch": 0.8043956043956044,
"grad_norm": 0.14449217915534973,
"learning_rate": 0.002440553745928339,
"loss": 2.7824,
"step": 366
},
{
"epoch": 0.8065934065934066,
"grad_norm": 0.03849424794316292,
"learning_rate": 0.0024381107491856678,
"loss": 2.9567,
"step": 367
},
{
"epoch": 0.8087912087912088,
"grad_norm": 0.08752796053886414,
"learning_rate": 0.0024356677524429967,
"loss": 2.6013,
"step": 368
},
{
"epoch": 0.810989010989011,
"grad_norm": 0.10728296637535095,
"learning_rate": 0.002433224755700326,
"loss": 2.5205,
"step": 369
},
{
"epoch": 0.8131868131868132,
"grad_norm": 0.01369336899369955,
"learning_rate": 0.002430781758957655,
"loss": 2.53,
"step": 370
},
{
"epoch": 0.8153846153846154,
"grad_norm": 0.2453184276819229,
"learning_rate": 0.0024283387622149838,
"loss": 2.479,
"step": 371
},
{
"epoch": 0.8175824175824176,
"grad_norm": 0.21739144623279572,
"learning_rate": 0.0024258957654723127,
"loss": 2.6806,
"step": 372
},
{
"epoch": 0.8197802197802198,
"grad_norm": 0.024779394268989563,
"learning_rate": 0.002423452768729642,
"loss": 2.2978,
"step": 373
},
{
"epoch": 0.8219780219780219,
"grad_norm": 0.02278847061097622,
"learning_rate": 0.002421009771986971,
"loss": 2.0834,
"step": 374
},
{
"epoch": 0.8241758241758241,
"grad_norm": 0.5629492402076721,
"learning_rate": 0.0024185667752442998,
"loss": 2.971,
"step": 375
},
{
"epoch": 0.8263736263736263,
"grad_norm": 0.013995617628097534,
"learning_rate": 0.0024161237785016287,
"loss": 2.703,
"step": 376
},
{
"epoch": 0.8285714285714286,
"grad_norm": 0.015931056812405586,
"learning_rate": 0.0024136807817589576,
"loss": 2.561,
"step": 377
},
{
"epoch": 0.8307692307692308,
"grad_norm": 0.05244484916329384,
"learning_rate": 0.002411237785016287,
"loss": 2.2662,
"step": 378
},
{
"epoch": 0.832967032967033,
"grad_norm": 0.021164577454328537,
"learning_rate": 0.0024087947882736158,
"loss": 2.3738,
"step": 379
},
{
"epoch": 0.8351648351648352,
"grad_norm": 0.020344359800219536,
"learning_rate": 0.0024063517915309447,
"loss": 3.031,
"step": 380
},
{
"epoch": 0.8373626373626374,
"grad_norm": 0.04574725031852722,
"learning_rate": 0.0024039087947882736,
"loss": 3.4111,
"step": 381
},
{
"epoch": 0.8395604395604396,
"grad_norm": 0.0199294276535511,
"learning_rate": 0.002401465798045603,
"loss": 1.8287,
"step": 382
},
{
"epoch": 0.8417582417582418,
"grad_norm": 0.026166317984461784,
"learning_rate": 0.0023990228013029318,
"loss": 2.1001,
"step": 383
},
{
"epoch": 0.843956043956044,
"grad_norm": 0.01917915605008602,
"learning_rate": 0.0023965798045602607,
"loss": 2.7754,
"step": 384
},
{
"epoch": 0.8461538461538461,
"grad_norm": 0.010772647336125374,
"learning_rate": 0.0023941368078175896,
"loss": 2.3155,
"step": 385
},
{
"epoch": 0.8483516483516483,
"grad_norm": 0.08823186904191971,
"learning_rate": 0.002391693811074919,
"loss": 2.9119,
"step": 386
},
{
"epoch": 0.8505494505494505,
"grad_norm": 0.012721859849989414,
"learning_rate": 0.0023892508143322478,
"loss": 3.34,
"step": 387
},
{
"epoch": 0.8527472527472527,
"grad_norm": 0.010566303506493568,
"learning_rate": 0.0023868078175895767,
"loss": 2.3389,
"step": 388
},
{
"epoch": 0.8549450549450549,
"grad_norm": 0.014985915273427963,
"learning_rate": 0.0023843648208469056,
"loss": 1.9324,
"step": 389
},
{
"epoch": 0.8571428571428571,
"grad_norm": 0.02516806498169899,
"learning_rate": 0.0023819218241042345,
"loss": 3.5201,
"step": 390
},
{
"epoch": 0.8593406593406593,
"grad_norm": 0.024456681683659554,
"learning_rate": 0.002379478827361564,
"loss": 2.5535,
"step": 391
},
{
"epoch": 0.8615384615384616,
"grad_norm": 0.02058997191488743,
"learning_rate": 0.0023770358306188927,
"loss": 2.1981,
"step": 392
},
{
"epoch": 0.8637362637362638,
"grad_norm": 0.024040214717388153,
"learning_rate": 0.0023745928338762216,
"loss": 3.0296,
"step": 393
},
{
"epoch": 0.865934065934066,
"grad_norm": 0.017731541767716408,
"learning_rate": 0.0023721498371335505,
"loss": 2.7152,
"step": 394
},
{
"epoch": 0.8681318681318682,
"grad_norm": 0.018150944262742996,
"learning_rate": 0.00236970684039088,
"loss": 2.6416,
"step": 395
},
{
"epoch": 0.8703296703296703,
"grad_norm": 0.016256919130682945,
"learning_rate": 0.0023672638436482087,
"loss": 2.8548,
"step": 396
},
{
"epoch": 0.8725274725274725,
"grad_norm": 0.13369649648666382,
"learning_rate": 0.0023648208469055376,
"loss": 2.2175,
"step": 397
},
{
"epoch": 0.8747252747252747,
"grad_norm": 0.08032003045082092,
"learning_rate": 0.0023623778501628665,
"loss": 3.0807,
"step": 398
},
{
"epoch": 0.8769230769230769,
"grad_norm": 0.019864169880747795,
"learning_rate": 0.0023599348534201954,
"loss": 3.3797,
"step": 399
},
{
"epoch": 0.8791208791208791,
"grad_norm": 0.014630906283855438,
"learning_rate": 0.0023574918566775247,
"loss": 2.6934,
"step": 400
},
{
"epoch": 0.8813186813186813,
"grad_norm": 0.010648279450833797,
"learning_rate": 0.0023550488599348536,
"loss": 3.5845,
"step": 401
},
{
"epoch": 0.8835164835164835,
"grad_norm": 0.026615019887685776,
"learning_rate": 0.0023526058631921825,
"loss": 2.7602,
"step": 402
},
{
"epoch": 0.8857142857142857,
"grad_norm": 0.035187337547540665,
"learning_rate": 0.0023501628664495114,
"loss": 2.4363,
"step": 403
},
{
"epoch": 0.8879120879120879,
"grad_norm": 0.013904489576816559,
"learning_rate": 0.0023477198697068407,
"loss": 2.255,
"step": 404
},
{
"epoch": 0.8901098901098901,
"grad_norm": 0.021835554391145706,
"learning_rate": 0.0023452768729641696,
"loss": 2.6301,
"step": 405
},
{
"epoch": 0.8923076923076924,
"grad_norm": 0.33056357502937317,
"learning_rate": 0.0023428338762214985,
"loss": 2.1717,
"step": 406
},
{
"epoch": 0.8945054945054945,
"grad_norm": 0.017140474170446396,
"learning_rate": 0.0023403908794788274,
"loss": 2.6898,
"step": 407
},
{
"epoch": 0.8967032967032967,
"grad_norm": 0.05310331657528877,
"learning_rate": 0.0023379478827361562,
"loss": 2.2073,
"step": 408
},
{
"epoch": 0.8989010989010989,
"grad_norm": 0.02211860567331314,
"learning_rate": 0.0023355048859934856,
"loss": 3.1547,
"step": 409
},
{
"epoch": 0.9010989010989011,
"grad_norm": 0.024500638246536255,
"learning_rate": 0.0023330618892508145,
"loss": 2.2487,
"step": 410
},
{
"epoch": 0.9032967032967033,
"grad_norm": 0.011415651068091393,
"learning_rate": 0.0023306188925081434,
"loss": 2.4258,
"step": 411
},
{
"epoch": 0.9054945054945055,
"grad_norm": 0.07653189450502396,
"learning_rate": 0.0023281758957654723,
"loss": 2.7593,
"step": 412
},
{
"epoch": 0.9076923076923077,
"grad_norm": 0.019408097490668297,
"learning_rate": 0.0023257328990228016,
"loss": 2.5112,
"step": 413
},
{
"epoch": 0.9098901098901099,
"grad_norm": 0.09565436094999313,
"learning_rate": 0.0023232899022801305,
"loss": 2.577,
"step": 414
},
{
"epoch": 0.9120879120879121,
"grad_norm": 0.025640321895480156,
"learning_rate": 0.0023208469055374594,
"loss": 2.6943,
"step": 415
},
{
"epoch": 0.9142857142857143,
"grad_norm": 0.02181849256157875,
"learning_rate": 0.0023184039087947883,
"loss": 3.8044,
"step": 416
},
{
"epoch": 0.9164835164835164,
"grad_norm": 0.012063076719641685,
"learning_rate": 0.002315960912052117,
"loss": 2.5952,
"step": 417
},
{
"epoch": 0.9186813186813186,
"grad_norm": 0.03450430929660797,
"learning_rate": 0.0023135179153094465,
"loss": 2.7774,
"step": 418
},
{
"epoch": 0.9208791208791208,
"grad_norm": 0.45223432779312134,
"learning_rate": 0.0023110749185667754,
"loss": 2.4447,
"step": 419
},
{
"epoch": 0.9230769230769231,
"grad_norm": 0.025702567771077156,
"learning_rate": 0.0023086319218241043,
"loss": 2.3553,
"step": 420
},
{
"epoch": 0.9252747252747253,
"grad_norm": 0.017728371545672417,
"learning_rate": 0.002306188925081433,
"loss": 2.2885,
"step": 421
},
{
"epoch": 0.9274725274725275,
"grad_norm": 0.021141264587640762,
"learning_rate": 0.0023037459283387625,
"loss": 2.5135,
"step": 422
},
{
"epoch": 0.9296703296703297,
"grad_norm": 1.4496071338653564,
"learning_rate": 0.0023013029315960914,
"loss": 2.5849,
"step": 423
},
{
"epoch": 0.9318681318681319,
"grad_norm": 2.16518497467041,
"learning_rate": 0.0022988599348534203,
"loss": 2.426,
"step": 424
},
{
"epoch": 0.9340659340659341,
"grad_norm": 0.020598256960511208,
"learning_rate": 0.002296416938110749,
"loss": 2.0187,
"step": 425
},
{
"epoch": 0.9362637362637363,
"grad_norm": 0.0277436301112175,
"learning_rate": 0.002293973941368078,
"loss": 3.0879,
"step": 426
},
{
"epoch": 0.9384615384615385,
"grad_norm": 0.15797604620456696,
"learning_rate": 0.0022915309446254074,
"loss": 2.1565,
"step": 427
},
{
"epoch": 0.9406593406593406,
"grad_norm": 0.05432300269603729,
"learning_rate": 0.0022890879478827363,
"loss": 2.311,
"step": 428
},
{
"epoch": 0.9428571428571428,
"grad_norm": 0.026955392211675644,
"learning_rate": 0.002286644951140065,
"loss": 2.7326,
"step": 429
},
{
"epoch": 0.945054945054945,
"grad_norm": 0.016387581825256348,
"learning_rate": 0.002284201954397394,
"loss": 2.7158,
"step": 430
},
{
"epoch": 0.9472527472527472,
"grad_norm": 0.02315669134259224,
"learning_rate": 0.0022817589576547234,
"loss": 2.4051,
"step": 431
},
{
"epoch": 0.9494505494505494,
"grad_norm": 0.2296651154756546,
"learning_rate": 0.0022793159609120523,
"loss": 2.5375,
"step": 432
},
{
"epoch": 0.9516483516483516,
"grad_norm": 0.013821404427289963,
"learning_rate": 0.002276872964169381,
"loss": 1.9929,
"step": 433
},
{
"epoch": 0.9538461538461539,
"grad_norm": 0.28112685680389404,
"learning_rate": 0.00227442996742671,
"loss": 2.8213,
"step": 434
},
{
"epoch": 0.9560439560439561,
"grad_norm": 0.01522796880453825,
"learning_rate": 0.002271986970684039,
"loss": 2.7129,
"step": 435
},
{
"epoch": 0.9582417582417583,
"grad_norm": 0.026348400861024857,
"learning_rate": 0.0022695439739413683,
"loss": 2.4858,
"step": 436
},
{
"epoch": 0.9604395604395605,
"grad_norm": 0.03594924509525299,
"learning_rate": 0.002267100977198697,
"loss": 2.7458,
"step": 437
},
{
"epoch": 0.9626373626373627,
"grad_norm": 0.07751062512397766,
"learning_rate": 0.002264657980456026,
"loss": 2.2806,
"step": 438
},
{
"epoch": 0.9648351648351648,
"grad_norm": 0.1993318647146225,
"learning_rate": 0.002262214983713355,
"loss": 3.1702,
"step": 439
},
{
"epoch": 0.967032967032967,
"grad_norm": 0.07368061691522598,
"learning_rate": 0.0022597719869706843,
"loss": 2.5597,
"step": 440
},
{
"epoch": 0.9692307692307692,
"grad_norm": 0.026725711300969124,
"learning_rate": 0.002257328990228013,
"loss": 2.9086,
"step": 441
},
{
"epoch": 0.9714285714285714,
"grad_norm": 0.017008459195494652,
"learning_rate": 0.002254885993485342,
"loss": 1.9729,
"step": 442
},
{
"epoch": 0.9736263736263736,
"grad_norm": 0.04938315972685814,
"learning_rate": 0.002252442996742671,
"loss": 2.1711,
"step": 443
},
{
"epoch": 0.9758241758241758,
"grad_norm": 0.022152820602059364,
"learning_rate": 0.0022500000000000003,
"loss": 2.8618,
"step": 444
},
{
"epoch": 0.978021978021978,
"grad_norm": 0.026729391887784004,
"learning_rate": 0.002247557003257329,
"loss": 2.309,
"step": 445
},
{
"epoch": 0.9802197802197802,
"grad_norm": 0.029468074440956116,
"learning_rate": 0.002245114006514658,
"loss": 2.2182,
"step": 446
},
{
"epoch": 0.9824175824175824,
"grad_norm": 0.3300466537475586,
"learning_rate": 0.002242671009771987,
"loss": 2.3919,
"step": 447
},
{
"epoch": 0.9846153846153847,
"grad_norm": 0.02179468609392643,
"learning_rate": 0.002240228013029316,
"loss": 2.7356,
"step": 448
},
{
"epoch": 0.9868131868131869,
"grad_norm": 0.04703761264681816,
"learning_rate": 0.002237785016286645,
"loss": 2.9259,
"step": 449
},
{
"epoch": 0.989010989010989,
"grad_norm": 0.05667249113321304,
"learning_rate": 0.002235342019543974,
"loss": 2.0488,
"step": 450
},
{
"epoch": 0.9912087912087912,
"grad_norm": 0.1434217095375061,
"learning_rate": 0.002232899022801303,
"loss": 2.9943,
"step": 451
},
{
"epoch": 0.9934065934065934,
"grad_norm": 0.39706510305404663,
"learning_rate": 0.002230456026058632,
"loss": 2.4752,
"step": 452
},
{
"epoch": 0.9956043956043956,
"grad_norm": 0.07620025426149368,
"learning_rate": 0.002228013029315961,
"loss": 1.9744,
"step": 453
},
{
"epoch": 0.9978021978021978,
"grad_norm": 0.028210952877998352,
"learning_rate": 0.00222557003257329,
"loss": 2.1446,
"step": 454
},
{
"epoch": 1.0,
"grad_norm": 0.09033296257257462,
"learning_rate": 0.002223127035830619,
"loss": 2.1521,
"step": 455
},
{
"epoch": 1.0021978021978022,
"grad_norm": 0.10748340934515,
"learning_rate": 0.002220684039087948,
"loss": 2.8333,
"step": 456
},
{
"epoch": 1.0043956043956044,
"grad_norm": 0.24330149590969086,
"learning_rate": 0.0022182410423452767,
"loss": 2.7546,
"step": 457
},
{
"epoch": 1.0065934065934066,
"grad_norm": 0.03868294134736061,
"learning_rate": 0.002215798045602606,
"loss": 2.5181,
"step": 458
},
{
"epoch": 1.0087912087912088,
"grad_norm": 0.030569883063435555,
"learning_rate": 0.002213355048859935,
"loss": 2.4631,
"step": 459
},
{
"epoch": 1.010989010989011,
"grad_norm": 0.024000326171517372,
"learning_rate": 0.002210912052117264,
"loss": 2.9215,
"step": 460
},
{
"epoch": 1.0131868131868131,
"grad_norm": 0.025105871260166168,
"learning_rate": 0.0022084690553745927,
"loss": 2.1111,
"step": 461
},
{
"epoch": 1.0153846153846153,
"grad_norm": 0.030882153660058975,
"learning_rate": 0.002206026058631922,
"loss": 2.9825,
"step": 462
},
{
"epoch": 1.0175824175824175,
"grad_norm": 0.5997393131256104,
"learning_rate": 0.002203583061889251,
"loss": 2.6266,
"step": 463
},
{
"epoch": 1.0197802197802197,
"grad_norm": 0.035910315811634064,
"learning_rate": 0.00220114006514658,
"loss": 2.115,
"step": 464
},
{
"epoch": 1.021978021978022,
"grad_norm": 0.02183556742966175,
"learning_rate": 0.0021986970684039087,
"loss": 2.6819,
"step": 465
},
{
"epoch": 1.024175824175824,
"grad_norm": 0.013235201127827168,
"learning_rate": 0.0021962540716612376,
"loss": 2.6984,
"step": 466
},
{
"epoch": 1.0263736263736263,
"grad_norm": 0.023340629413723946,
"learning_rate": 0.002193811074918567,
"loss": 2.4582,
"step": 467
},
{
"epoch": 1.0285714285714285,
"grad_norm": 0.0973559021949768,
"learning_rate": 0.002191368078175896,
"loss": 2.5177,
"step": 468
},
{
"epoch": 1.0307692307692307,
"grad_norm": 0.020193662494421005,
"learning_rate": 0.0021889250814332247,
"loss": 2.5217,
"step": 469
},
{
"epoch": 1.032967032967033,
"grad_norm": 0.0484769269824028,
"learning_rate": 0.0021864820846905536,
"loss": 2.6372,
"step": 470
},
{
"epoch": 1.0351648351648353,
"grad_norm": 0.06362493336200714,
"learning_rate": 0.002184039087947883,
"loss": 1.9837,
"step": 471
},
{
"epoch": 1.0373626373626375,
"grad_norm": 0.014899802394211292,
"learning_rate": 0.002181596091205212,
"loss": 2.6468,
"step": 472
},
{
"epoch": 1.0395604395604396,
"grad_norm": 0.053877368569374084,
"learning_rate": 0.0021791530944625407,
"loss": 2.2309,
"step": 473
},
{
"epoch": 1.0417582417582418,
"grad_norm": 0.6332066655158997,
"learning_rate": 0.0021767100977198696,
"loss": 2.4201,
"step": 474
},
{
"epoch": 1.043956043956044,
"grad_norm": 0.03319546580314636,
"learning_rate": 0.0021742671009771985,
"loss": 2.0905,
"step": 475
},
{
"epoch": 1.0461538461538462,
"grad_norm": 0.03222614899277687,
"learning_rate": 0.002171824104234528,
"loss": 2.6413,
"step": 476
},
{
"epoch": 1.0483516483516484,
"grad_norm": 0.01616428792476654,
"learning_rate": 0.0021693811074918567,
"loss": 1.9845,
"step": 477
},
{
"epoch": 1.0505494505494506,
"grad_norm": 0.017148522660136223,
"learning_rate": 0.0021669381107491856,
"loss": 1.9275,
"step": 478
},
{
"epoch": 1.0527472527472528,
"grad_norm": 0.023086179047822952,
"learning_rate": 0.0021644951140065145,
"loss": 2.1126,
"step": 479
},
{
"epoch": 1.054945054945055,
"grad_norm": 0.019238149747252464,
"learning_rate": 0.002162052117263844,
"loss": 2.4277,
"step": 480
},
{
"epoch": 1.0571428571428572,
"grad_norm": 0.021163631230592728,
"learning_rate": 0.0021596091205211727,
"loss": 1.8179,
"step": 481
},
{
"epoch": 1.0593406593406594,
"grad_norm": 0.04428611695766449,
"learning_rate": 0.0021571661237785016,
"loss": 1.9877,
"step": 482
},
{
"epoch": 1.0615384615384615,
"grad_norm": 0.018576402217149734,
"learning_rate": 0.0021547231270358305,
"loss": 2.2655,
"step": 483
},
{
"epoch": 1.0637362637362637,
"grad_norm": 0.34795787930488586,
"learning_rate": 0.0021522801302931594,
"loss": 1.99,
"step": 484
},
{
"epoch": 1.065934065934066,
"grad_norm": 0.02776804380118847,
"learning_rate": 0.0021498371335504887,
"loss": 2.2511,
"step": 485
},
{
"epoch": 1.0681318681318681,
"grad_norm": 0.02286771684885025,
"learning_rate": 0.0021473941368078176,
"loss": 2.1579,
"step": 486
},
{
"epoch": 1.0703296703296703,
"grad_norm": 0.014362718909978867,
"learning_rate": 0.0021449511400651465,
"loss": 1.9156,
"step": 487
},
{
"epoch": 1.0725274725274725,
"grad_norm": 0.03315817564725876,
"learning_rate": 0.0021425081433224754,
"loss": 2.1577,
"step": 488
},
{
"epoch": 1.0747252747252747,
"grad_norm": 0.03041631169617176,
"learning_rate": 0.0021400651465798047,
"loss": 2.5762,
"step": 489
},
{
"epoch": 1.0769230769230769,
"grad_norm": 0.018775612115859985,
"learning_rate": 0.0021376221498371336,
"loss": 2.2322,
"step": 490
},
{
"epoch": 1.079120879120879,
"grad_norm": 0.02440355345606804,
"learning_rate": 0.0021351791530944625,
"loss": 2.5691,
"step": 491
},
{
"epoch": 1.0813186813186813,
"grad_norm": 0.06933584809303284,
"learning_rate": 0.0021327361563517914,
"loss": 2.829,
"step": 492
},
{
"epoch": 1.0835164835164834,
"grad_norm": 0.6479172706604004,
"learning_rate": 0.0021302931596091203,
"loss": 3.1845,
"step": 493
},
{
"epoch": 1.0857142857142856,
"grad_norm": 0.02934328280389309,
"learning_rate": 0.0021278501628664496,
"loss": 1.903,
"step": 494
},
{
"epoch": 1.0879120879120878,
"grad_norm": 0.022499507293105125,
"learning_rate": 0.0021254071661237785,
"loss": 1.8677,
"step": 495
},
{
"epoch": 1.09010989010989,
"grad_norm": 0.03454659879207611,
"learning_rate": 0.0021229641693811074,
"loss": 2.7256,
"step": 496
},
{
"epoch": 1.0923076923076924,
"grad_norm": 0.026321521028876305,
"learning_rate": 0.0021205211726384363,
"loss": 2.1144,
"step": 497
},
{
"epoch": 1.0945054945054946,
"grad_norm": 0.05514807254076004,
"learning_rate": 0.0021180781758957656,
"loss": 1.9636,
"step": 498
},
{
"epoch": 1.0967032967032968,
"grad_norm": 0.030024802312254906,
"learning_rate": 0.0021156351791530945,
"loss": 2.4542,
"step": 499
},
{
"epoch": 1.098901098901099,
"grad_norm": 0.021501895040273666,
"learning_rate": 0.0021131921824104234,
"loss": 2.2248,
"step": 500
},
{
"epoch": 1.1010989010989012,
"grad_norm": 0.033270806074142456,
"learning_rate": 0.0021107491856677523,
"loss": 2.3699,
"step": 501
},
{
"epoch": 1.1032967032967034,
"grad_norm": 0.02079012617468834,
"learning_rate": 0.002108306188925081,
"loss": 2.5214,
"step": 502
},
{
"epoch": 1.1054945054945056,
"grad_norm": 0.01893988624215126,
"learning_rate": 0.0021058631921824105,
"loss": 2.295,
"step": 503
},
{
"epoch": 1.1076923076923078,
"grad_norm": 0.2213221937417984,
"learning_rate": 0.0021034201954397394,
"loss": 2.8297,
"step": 504
},
{
"epoch": 1.10989010989011,
"grad_norm": 0.018863068893551826,
"learning_rate": 0.0021009771986970683,
"loss": 2.073,
"step": 505
},
{
"epoch": 1.1120879120879121,
"grad_norm": 0.049462996423244476,
"learning_rate": 0.002098534201954397,
"loss": 3.2442,
"step": 506
},
{
"epoch": 1.1142857142857143,
"grad_norm": 0.022047117352485657,
"learning_rate": 0.0020960912052117265,
"loss": 2.1594,
"step": 507
},
{
"epoch": 1.1164835164835165,
"grad_norm": 0.03840857371687889,
"learning_rate": 0.0020936482084690554,
"loss": 2.5122,
"step": 508
},
{
"epoch": 1.1186813186813187,
"grad_norm": 0.0226457342505455,
"learning_rate": 0.0020912052117263843,
"loss": 2.0718,
"step": 509
},
{
"epoch": 1.120879120879121,
"grad_norm": 0.018072908744215965,
"learning_rate": 0.002088762214983713,
"loss": 2.2751,
"step": 510
},
{
"epoch": 1.123076923076923,
"grad_norm": 0.06161002814769745,
"learning_rate": 0.0020863192182410425,
"loss": 2.6368,
"step": 511
},
{
"epoch": 1.1252747252747253,
"grad_norm": 0.03674451634287834,
"learning_rate": 0.0020838762214983714,
"loss": 2.4399,
"step": 512
},
{
"epoch": 1.1274725274725275,
"grad_norm": 0.09910057485103607,
"learning_rate": 0.0020814332247557003,
"loss": 2.5625,
"step": 513
},
{
"epoch": 1.1296703296703297,
"grad_norm": 0.24920952320098877,
"learning_rate": 0.002078990228013029,
"loss": 2.6452,
"step": 514
},
{
"epoch": 1.1318681318681318,
"grad_norm": 0.04675315320491791,
"learning_rate": 0.002076547231270358,
"loss": 2.4228,
"step": 515
},
{
"epoch": 1.134065934065934,
"grad_norm": 0.02355196699500084,
"learning_rate": 0.0020741042345276874,
"loss": 2.1582,
"step": 516
},
{
"epoch": 1.1362637362637362,
"grad_norm": 0.02632654830813408,
"learning_rate": 0.0020716612377850163,
"loss": 1.8292,
"step": 517
},
{
"epoch": 1.1384615384615384,
"grad_norm": 0.021832846105098724,
"learning_rate": 0.002069218241042345,
"loss": 2.5528,
"step": 518
},
{
"epoch": 1.1406593406593406,
"grad_norm": 0.04003608599305153,
"learning_rate": 0.002066775244299674,
"loss": 2.056,
"step": 519
},
{
"epoch": 1.1428571428571428,
"grad_norm": 0.08521031588315964,
"learning_rate": 0.0020643322475570034,
"loss": 2.2177,
"step": 520
},
{
"epoch": 1.145054945054945,
"grad_norm": 0.04957525432109833,
"learning_rate": 0.0020618892508143323,
"loss": 2.336,
"step": 521
},
{
"epoch": 1.1472527472527472,
"grad_norm": 0.043508656322956085,
"learning_rate": 0.002059446254071661,
"loss": 2.7035,
"step": 522
},
{
"epoch": 1.1494505494505494,
"grad_norm": 2.461320638656616,
"learning_rate": 0.00205700325732899,
"loss": 2.122,
"step": 523
},
{
"epoch": 1.1516483516483516,
"grad_norm": 0.0612497478723526,
"learning_rate": 0.002054560260586319,
"loss": 2.1273,
"step": 524
},
{
"epoch": 1.1538461538461537,
"grad_norm": 0.01509169489145279,
"learning_rate": 0.0020521172638436483,
"loss": 2.6816,
"step": 525
},
{
"epoch": 1.156043956043956,
"grad_norm": 0.18717047572135925,
"learning_rate": 0.002049674267100977,
"loss": 2.2962,
"step": 526
},
{
"epoch": 1.1582417582417581,
"grad_norm": 0.02411770075559616,
"learning_rate": 0.002047231270358306,
"loss": 1.7035,
"step": 527
},
{
"epoch": 1.1604395604395605,
"grad_norm": 0.03743946552276611,
"learning_rate": 0.002044788273615635,
"loss": 1.9655,
"step": 528
},
{
"epoch": 1.1626373626373627,
"grad_norm": 0.04976235702633858,
"learning_rate": 0.0020423452768729643,
"loss": 2.622,
"step": 529
},
{
"epoch": 1.164835164835165,
"grad_norm": 0.017133789137005806,
"learning_rate": 0.002039902280130293,
"loss": 2.2293,
"step": 530
},
{
"epoch": 1.167032967032967,
"grad_norm": 0.016698624938726425,
"learning_rate": 0.002037459283387622,
"loss": 2.4342,
"step": 531
},
{
"epoch": 1.1692307692307693,
"grad_norm": 0.029416749253869057,
"learning_rate": 0.002035016286644951,
"loss": 2.374,
"step": 532
},
{
"epoch": 1.1714285714285715,
"grad_norm": 0.03704821318387985,
"learning_rate": 0.00203257328990228,
"loss": 2.5202,
"step": 533
},
{
"epoch": 1.1736263736263737,
"grad_norm": 0.032146990299224854,
"learning_rate": 0.0020301302931596092,
"loss": 2.6811,
"step": 534
},
{
"epoch": 1.1758241758241759,
"grad_norm": 0.023578539490699768,
"learning_rate": 0.002027687296416938,
"loss": 2.2361,
"step": 535
},
{
"epoch": 1.178021978021978,
"grad_norm": 1.8469176292419434,
"learning_rate": 0.002025244299674267,
"loss": 1.795,
"step": 536
},
{
"epoch": 1.1802197802197802,
"grad_norm": 0.016327382996678352,
"learning_rate": 0.002022801302931596,
"loss": 2.5551,
"step": 537
},
{
"epoch": 1.1824175824175824,
"grad_norm": 0.02482936531305313,
"learning_rate": 0.0020203583061889252,
"loss": 2.2942,
"step": 538
},
{
"epoch": 1.1846153846153846,
"grad_norm": 0.5670949220657349,
"learning_rate": 0.002017915309446254,
"loss": 2.8515,
"step": 539
},
{
"epoch": 1.1868131868131868,
"grad_norm": 0.02718065306544304,
"learning_rate": 0.002015472312703583,
"loss": 2.4402,
"step": 540
},
{
"epoch": 1.189010989010989,
"grad_norm": 1.7034631967544556,
"learning_rate": 0.002013029315960912,
"loss": 1.9189,
"step": 541
},
{
"epoch": 1.1912087912087912,
"grad_norm": 0.05805704742670059,
"learning_rate": 0.002010586319218241,
"loss": 2.2199,
"step": 542
},
{
"epoch": 1.1934065934065934,
"grad_norm": 0.028596457093954086,
"learning_rate": 0.00200814332247557,
"loss": 2.5426,
"step": 543
},
{
"epoch": 1.1956043956043956,
"grad_norm": 0.02020750567317009,
"learning_rate": 0.002005700325732899,
"loss": 2.4155,
"step": 544
},
{
"epoch": 1.1978021978021978,
"grad_norm": 0.02121913619339466,
"learning_rate": 0.002003257328990228,
"loss": 1.8035,
"step": 545
},
{
"epoch": 1.2,
"grad_norm": 0.039216309785842896,
"learning_rate": 0.002000814332247557,
"loss": 2.9912,
"step": 546
},
{
"epoch": 1.2021978021978021,
"grad_norm": 0.15490901470184326,
"learning_rate": 0.001998371335504886,
"loss": 2.3647,
"step": 547
},
{
"epoch": 1.2043956043956043,
"grad_norm": 0.03326239809393883,
"learning_rate": 0.001995928338762215,
"loss": 2.0177,
"step": 548
},
{
"epoch": 1.2065934065934065,
"grad_norm": 0.01878621056675911,
"learning_rate": 0.001993485342019544,
"loss": 2.7403,
"step": 549
},
{
"epoch": 1.2087912087912087,
"grad_norm": 0.1944480687379837,
"learning_rate": 0.001991042345276873,
"loss": 2.3443,
"step": 550
},
{
"epoch": 1.210989010989011,
"grad_norm": 0.037707362323999405,
"learning_rate": 0.0019885993485342017,
"loss": 2.1401,
"step": 551
},
{
"epoch": 1.213186813186813,
"grad_norm": 0.062297720462083817,
"learning_rate": 0.001986156351791531,
"loss": 2.3683,
"step": 552
},
{
"epoch": 1.2153846153846155,
"grad_norm": 0.034150995314121246,
"learning_rate": 0.00198371335504886,
"loss": 2.0556,
"step": 553
},
{
"epoch": 1.2175824175824177,
"grad_norm": 0.0256241112947464,
"learning_rate": 0.001981270358306189,
"loss": 2.0809,
"step": 554
},
{
"epoch": 1.2197802197802199,
"grad_norm": 0.05154910311102867,
"learning_rate": 0.0019788273615635177,
"loss": 2.1162,
"step": 555
},
{
"epoch": 1.221978021978022,
"grad_norm": 0.19502048194408417,
"learning_rate": 0.001976384364820847,
"loss": 2.5782,
"step": 556
},
{
"epoch": 1.2241758241758243,
"grad_norm": 0.019277194514870644,
"learning_rate": 0.001973941368078176,
"loss": 2.6205,
"step": 557
},
{
"epoch": 1.2263736263736265,
"grad_norm": 0.2771492898464203,
"learning_rate": 0.001971498371335505,
"loss": 2.3556,
"step": 558
},
{
"epoch": 1.2285714285714286,
"grad_norm": 0.020869240164756775,
"learning_rate": 0.0019690553745928337,
"loss": 2.2923,
"step": 559
},
{
"epoch": 1.2307692307692308,
"grad_norm": 0.13194453716278076,
"learning_rate": 0.0019666123778501626,
"loss": 2.1606,
"step": 560
},
{
"epoch": 1.232967032967033,
"grad_norm": 0.019498448818922043,
"learning_rate": 0.001964169381107492,
"loss": 2.6002,
"step": 561
},
{
"epoch": 1.2351648351648352,
"grad_norm": 0.06741607189178467,
"learning_rate": 0.001961726384364821,
"loss": 2.2092,
"step": 562
},
{
"epoch": 1.2373626373626374,
"grad_norm": 0.03738139569759369,
"learning_rate": 0.0019592833876221497,
"loss": 2.5044,
"step": 563
},
{
"epoch": 1.2395604395604396,
"grad_norm": 0.015364487655460835,
"learning_rate": 0.0019568403908794786,
"loss": 2.3343,
"step": 564
},
{
"epoch": 1.2417582417582418,
"grad_norm": 0.8967491984367371,
"learning_rate": 0.001954397394136808,
"loss": 2.3706,
"step": 565
},
{
"epoch": 1.243956043956044,
"grad_norm": 0.04372824355959892,
"learning_rate": 0.0019519543973941368,
"loss": 2.9172,
"step": 566
},
{
"epoch": 1.2461538461538462,
"grad_norm": 0.025654718279838562,
"learning_rate": 0.0019495114006514657,
"loss": 2.8418,
"step": 567
},
{
"epoch": 1.2483516483516484,
"grad_norm": 0.0864330530166626,
"learning_rate": 0.0019470684039087948,
"loss": 2.3192,
"step": 568
},
{
"epoch": 1.2505494505494505,
"grad_norm": 0.02142184227705002,
"learning_rate": 0.0019446254071661237,
"loss": 2.2217,
"step": 569
},
{
"epoch": 1.2527472527472527,
"grad_norm": 0.5022119879722595,
"learning_rate": 0.0019421824104234526,
"loss": 2.3908,
"step": 570
},
{
"epoch": 1.254945054945055,
"grad_norm": 0.047244299203157425,
"learning_rate": 0.0019397394136807817,
"loss": 2.0495,
"step": 571
},
{
"epoch": 1.2571428571428571,
"grad_norm": 0.41980835795402527,
"learning_rate": 0.0019372964169381106,
"loss": 2.4161,
"step": 572
},
{
"epoch": 1.2593406593406593,
"grad_norm": 0.04297642037272453,
"learning_rate": 0.0019348534201954397,
"loss": 2.4698,
"step": 573
},
{
"epoch": 1.2615384615384615,
"grad_norm": 0.1767284870147705,
"learning_rate": 0.0019324104234527686,
"loss": 2.16,
"step": 574
},
{
"epoch": 1.2637362637362637,
"grad_norm": 0.029439883306622505,
"learning_rate": 0.0019299674267100977,
"loss": 2.0272,
"step": 575
},
{
"epoch": 1.2659340659340659,
"grad_norm": 0.022207440808415413,
"learning_rate": 0.0019275244299674266,
"loss": 1.8675,
"step": 576
},
{
"epoch": 1.268131868131868,
"grad_norm": 0.02023591287434101,
"learning_rate": 0.0019250814332247557,
"loss": 2.0895,
"step": 577
},
{
"epoch": 1.2703296703296703,
"grad_norm": 0.0205642431974411,
"learning_rate": 0.0019226384364820846,
"loss": 1.7731,
"step": 578
},
{
"epoch": 1.2725274725274724,
"grad_norm": 0.022339126095175743,
"learning_rate": 0.0019201954397394135,
"loss": 1.9712,
"step": 579
},
{
"epoch": 1.2747252747252746,
"grad_norm": 0.019322700798511505,
"learning_rate": 0.0019177524429967426,
"loss": 2.1199,
"step": 580
},
{
"epoch": 1.2769230769230768,
"grad_norm": 0.03329294174909592,
"learning_rate": 0.0019153094462540715,
"loss": 2.3549,
"step": 581
},
{
"epoch": 1.279120879120879,
"grad_norm": 0.017545146867632866,
"learning_rate": 0.0019128664495114006,
"loss": 2.5347,
"step": 582
},
{
"epoch": 1.2813186813186812,
"grad_norm": 0.23170159757137299,
"learning_rate": 0.0019104234527687295,
"loss": 2.235,
"step": 583
},
{
"epoch": 1.2835164835164834,
"grad_norm": 0.025223638862371445,
"learning_rate": 0.0019079804560260586,
"loss": 2.1898,
"step": 584
},
{
"epoch": 1.2857142857142856,
"grad_norm": 0.018736187368631363,
"learning_rate": 0.0019055374592833875,
"loss": 2.9275,
"step": 585
},
{
"epoch": 1.2879120879120878,
"grad_norm": 0.20970503985881805,
"learning_rate": 0.0019030944625407166,
"loss": 2.8385,
"step": 586
},
{
"epoch": 1.2901098901098902,
"grad_norm": 0.4265841245651245,
"learning_rate": 0.0019006514657980455,
"loss": 2.487,
"step": 587
},
{
"epoch": 1.2923076923076924,
"grad_norm": 0.14403890073299408,
"learning_rate": 0.0018982084690553744,
"loss": 2.9317,
"step": 588
},
{
"epoch": 1.2945054945054946,
"grad_norm": 0.03382799029350281,
"learning_rate": 0.0018957654723127035,
"loss": 2.1853,
"step": 589
},
{
"epoch": 1.2967032967032968,
"grad_norm": 0.10009512305259705,
"learning_rate": 0.0018933224755700324,
"loss": 2.5285,
"step": 590
},
{
"epoch": 1.298901098901099,
"grad_norm": 0.017963645979762077,
"learning_rate": 0.0018908794788273615,
"loss": 2.241,
"step": 591
},
{
"epoch": 1.3010989010989011,
"grad_norm": 0.03325553238391876,
"learning_rate": 0.0018884364820846904,
"loss": 2.6337,
"step": 592
},
{
"epoch": 1.3032967032967033,
"grad_norm": 0.02059227228164673,
"learning_rate": 0.0018859934853420195,
"loss": 2.5608,
"step": 593
},
{
"epoch": 1.3054945054945055,
"grad_norm": 0.02778852917253971,
"learning_rate": 0.0018835504885993484,
"loss": 2.0495,
"step": 594
},
{
"epoch": 1.3076923076923077,
"grad_norm": 0.021134963259100914,
"learning_rate": 0.0018811074918566775,
"loss": 2.3389,
"step": 595
},
{
"epoch": 1.30989010989011,
"grad_norm": 0.019280841574072838,
"learning_rate": 0.0018786644951140064,
"loss": 1.682,
"step": 596
},
{
"epoch": 1.312087912087912,
"grad_norm": 0.02162204682826996,
"learning_rate": 0.0018762214983713355,
"loss": 1.8923,
"step": 597
},
{
"epoch": 1.3142857142857143,
"grad_norm": 0.02305612526834011,
"learning_rate": 0.0018737785016286646,
"loss": 1.9065,
"step": 598
},
{
"epoch": 1.3164835164835165,
"grad_norm": 0.023686420172452927,
"learning_rate": 0.0018713355048859937,
"loss": 2.5969,
"step": 599
},
{
"epoch": 1.3186813186813187,
"grad_norm": 0.5076853036880493,
"learning_rate": 0.0018688925081433226,
"loss": 2.2921,
"step": 600
},
{
"epoch": 1.3208791208791208,
"grad_norm": 0.04497206583619118,
"learning_rate": 0.0018664495114006517,
"loss": 2.311,
"step": 601
},
{
"epoch": 1.323076923076923,
"grad_norm": 0.02347046136856079,
"learning_rate": 0.0018640065146579806,
"loss": 1.7965,
"step": 602
},
{
"epoch": 1.3252747252747252,
"grad_norm": 0.6230455040931702,
"learning_rate": 0.0018615635179153097,
"loss": 2.1138,
"step": 603
},
{
"epoch": 1.3274725274725274,
"grad_norm": 0.02414635941386223,
"learning_rate": 0.0018591205211726386,
"loss": 3.0538,
"step": 604
},
{
"epoch": 1.3296703296703296,
"grad_norm": 1.0347962379455566,
"learning_rate": 0.0018566775244299677,
"loss": 2.2676,
"step": 605
},
{
"epoch": 1.3318681318681318,
"grad_norm": 1.049870491027832,
"learning_rate": 0.0018542345276872966,
"loss": 2.1718,
"step": 606
},
{
"epoch": 1.334065934065934,
"grad_norm": 0.15487191081047058,
"learning_rate": 0.0018517915309446257,
"loss": 2.5609,
"step": 607
},
{
"epoch": 1.3362637362637364,
"grad_norm": 0.0772487223148346,
"learning_rate": 0.0018493485342019546,
"loss": 1.9263,
"step": 608
},
{
"epoch": 1.3384615384615386,
"grad_norm": 0.09991439431905746,
"learning_rate": 0.0018469055374592835,
"loss": 2.5453,
"step": 609
},
{
"epoch": 1.3406593406593408,
"grad_norm": 0.0313388966023922,
"learning_rate": 0.0018444625407166126,
"loss": 2.3495,
"step": 610
},
{
"epoch": 1.342857142857143,
"grad_norm": 0.02239510416984558,
"learning_rate": 0.0018420195439739415,
"loss": 2.0539,
"step": 611
},
{
"epoch": 1.3450549450549452,
"grad_norm": 0.06697522103786469,
"learning_rate": 0.0018395765472312706,
"loss": 2.0646,
"step": 612
},
{
"epoch": 1.3472527472527474,
"grad_norm": 0.020611634477972984,
"learning_rate": 0.0018371335504885995,
"loss": 1.7765,
"step": 613
},
{
"epoch": 1.3494505494505495,
"grad_norm": 0.026640567928552628,
"learning_rate": 0.0018346905537459286,
"loss": 1.8829,
"step": 614
},
{
"epoch": 1.3516483516483517,
"grad_norm": 0.01940467394888401,
"learning_rate": 0.0018322475570032575,
"loss": 1.8313,
"step": 615
},
{
"epoch": 1.353846153846154,
"grad_norm": 0.11797212064266205,
"learning_rate": 0.0018298045602605866,
"loss": 1.9998,
"step": 616
},
{
"epoch": 1.3560439560439561,
"grad_norm": 0.049303680658340454,
"learning_rate": 0.0018273615635179155,
"loss": 2.1754,
"step": 617
},
{
"epoch": 1.3582417582417583,
"grad_norm": 0.02472326159477234,
"learning_rate": 0.0018249185667752444,
"loss": 2.1061,
"step": 618
},
{
"epoch": 1.3604395604395605,
"grad_norm": 0.013275641947984695,
"learning_rate": 0.0018224755700325735,
"loss": 2.5028,
"step": 619
},
{
"epoch": 1.3626373626373627,
"grad_norm": 0.02107204683125019,
"learning_rate": 0.0018200325732899024,
"loss": 3.4194,
"step": 620
},
{
"epoch": 1.3648351648351649,
"grad_norm": 0.02259068563580513,
"learning_rate": 0.0018175895765472315,
"loss": 2.3886,
"step": 621
},
{
"epoch": 1.367032967032967,
"grad_norm": 0.017324309796094894,
"learning_rate": 0.0018151465798045604,
"loss": 2.2509,
"step": 622
},
{
"epoch": 1.3692307692307693,
"grad_norm": 0.044232070446014404,
"learning_rate": 0.0018127035830618895,
"loss": 2.3841,
"step": 623
},
{
"epoch": 1.3714285714285714,
"grad_norm": 0.051778122782707214,
"learning_rate": 0.0018102605863192184,
"loss": 2.4665,
"step": 624
},
{
"epoch": 1.3736263736263736,
"grad_norm": 0.11066672950983047,
"learning_rate": 0.0018078175895765475,
"loss": 1.9729,
"step": 625
},
{
"epoch": 1.3758241758241758,
"grad_norm": 0.0341743640601635,
"learning_rate": 0.0018053745928338764,
"loss": 2.1637,
"step": 626
},
{
"epoch": 1.378021978021978,
"grad_norm": 0.026253828778862953,
"learning_rate": 0.0018029315960912053,
"loss": 2.3802,
"step": 627
},
{
"epoch": 1.3802197802197802,
"grad_norm": 0.03700072318315506,
"learning_rate": 0.0018004885993485344,
"loss": 2.2393,
"step": 628
},
{
"epoch": 1.3824175824175824,
"grad_norm": 0.12532836198806763,
"learning_rate": 0.0017980456026058633,
"loss": 2.114,
"step": 629
},
{
"epoch": 1.3846153846153846,
"grad_norm": 0.02072116732597351,
"learning_rate": 0.0017956026058631924,
"loss": 2.2134,
"step": 630
},
{
"epoch": 1.3868131868131868,
"grad_norm": 0.027101557701826096,
"learning_rate": 0.0017931596091205213,
"loss": 2.0769,
"step": 631
},
{
"epoch": 1.389010989010989,
"grad_norm": 0.026539552956819534,
"learning_rate": 0.0017907166123778504,
"loss": 2.2602,
"step": 632
},
{
"epoch": 1.3912087912087912,
"grad_norm": 0.05788382142782211,
"learning_rate": 0.0017882736156351793,
"loss": 2.81,
"step": 633
},
{
"epoch": 1.3934065934065933,
"grad_norm": 0.02818615920841694,
"learning_rate": 0.0017858306188925084,
"loss": 2.0146,
"step": 634
},
{
"epoch": 1.3956043956043955,
"grad_norm": 0.06294171512126923,
"learning_rate": 0.0017833876221498373,
"loss": 2.5079,
"step": 635
},
{
"epoch": 1.3978021978021977,
"grad_norm": 3.389960765838623,
"learning_rate": 0.0017809446254071664,
"loss": 2.7604,
"step": 636
},
{
"epoch": 1.4,
"grad_norm": 0.030672065913677216,
"learning_rate": 0.0017785016286644953,
"loss": 1.9493,
"step": 637
},
{
"epoch": 1.402197802197802,
"grad_norm": 0.8590113520622253,
"learning_rate": 0.0017760586319218242,
"loss": 2.3102,
"step": 638
},
{
"epoch": 1.4043956043956043,
"grad_norm": 0.029765766113996506,
"learning_rate": 0.0017736156351791533,
"loss": 2.8375,
"step": 639
},
{
"epoch": 1.4065934065934065,
"grad_norm": 0.020430050790309906,
"learning_rate": 0.0017711726384364822,
"loss": 2.1127,
"step": 640
},
{
"epoch": 1.4087912087912087,
"grad_norm": 0.06371467560529709,
"learning_rate": 0.0017687296416938113,
"loss": 1.7852,
"step": 641
},
{
"epoch": 1.4109890109890109,
"grad_norm": 0.02931356430053711,
"learning_rate": 0.0017662866449511402,
"loss": 2.1006,
"step": 642
},
{
"epoch": 1.413186813186813,
"grad_norm": 0.018102148547768593,
"learning_rate": 0.0017638436482084693,
"loss": 1.8666,
"step": 643
},
{
"epoch": 1.4153846153846155,
"grad_norm": 0.027719559147953987,
"learning_rate": 0.0017614006514657982,
"loss": 2.3553,
"step": 644
},
{
"epoch": 1.4175824175824177,
"grad_norm": 0.014392991550266743,
"learning_rate": 0.0017589576547231273,
"loss": 2.3634,
"step": 645
},
{
"epoch": 1.4197802197802198,
"grad_norm": 0.0159554835408926,
"learning_rate": 0.0017565146579804562,
"loss": 2.1707,
"step": 646
},
{
"epoch": 1.421978021978022,
"grad_norm": 0.06308163702487946,
"learning_rate": 0.001754071661237785,
"loss": 2.2157,
"step": 647
},
{
"epoch": 1.4241758241758242,
"grad_norm": 0.02242080308496952,
"learning_rate": 0.0017516286644951142,
"loss": 2.4075,
"step": 648
},
{
"epoch": 1.4263736263736264,
"grad_norm": 0.015121976844966412,
"learning_rate": 0.001749185667752443,
"loss": 1.9815,
"step": 649
},
{
"epoch": 1.4285714285714286,
"grad_norm": 0.026439322158694267,
"learning_rate": 0.0017467426710097722,
"loss": 2.001,
"step": 650
},
{
"epoch": 1.4307692307692308,
"grad_norm": 0.028246264904737473,
"learning_rate": 0.001744299674267101,
"loss": 2.4592,
"step": 651
},
{
"epoch": 1.432967032967033,
"grad_norm": 0.49988114833831787,
"learning_rate": 0.0017418566775244302,
"loss": 2.4171,
"step": 652
},
{
"epoch": 1.4351648351648352,
"grad_norm": 0.02563559263944626,
"learning_rate": 0.001739413680781759,
"loss": 1.9265,
"step": 653
},
{
"epoch": 1.4373626373626374,
"grad_norm": 0.04233551025390625,
"learning_rate": 0.0017369706840390882,
"loss": 1.8512,
"step": 654
},
{
"epoch": 1.4395604395604396,
"grad_norm": 0.07374123483896255,
"learning_rate": 0.001734527687296417,
"loss": 2.0358,
"step": 655
},
{
"epoch": 1.4417582417582417,
"grad_norm": 0.055339738726615906,
"learning_rate": 0.001732084690553746,
"loss": 2.5798,
"step": 656
},
{
"epoch": 1.443956043956044,
"grad_norm": 0.017600079998373985,
"learning_rate": 0.001729641693811075,
"loss": 2.1376,
"step": 657
},
{
"epoch": 1.4461538461538461,
"grad_norm": 0.01386289857327938,
"learning_rate": 0.001727198697068404,
"loss": 2.2118,
"step": 658
},
{
"epoch": 1.4483516483516483,
"grad_norm": 0.41913577914237976,
"learning_rate": 0.001724755700325733,
"loss": 2.5393,
"step": 659
},
{
"epoch": 1.4505494505494505,
"grad_norm": 0.018337829038500786,
"learning_rate": 0.001722312703583062,
"loss": 2.204,
"step": 660
},
{
"epoch": 1.4527472527472527,
"grad_norm": 0.02074161171913147,
"learning_rate": 0.001719869706840391,
"loss": 2.2901,
"step": 661
},
{
"epoch": 1.4549450549450549,
"grad_norm": 0.021658066660165787,
"learning_rate": 0.00171742671009772,
"loss": 2.1653,
"step": 662
},
{
"epoch": 1.457142857142857,
"grad_norm": 0.022709082812070847,
"learning_rate": 0.001714983713355049,
"loss": 2.1895,
"step": 663
},
{
"epoch": 1.4593406593406593,
"grad_norm": 0.022865472361445427,
"learning_rate": 0.001712540716612378,
"loss": 2.1155,
"step": 664
},
{
"epoch": 1.4615384615384617,
"grad_norm": 0.024059291929006577,
"learning_rate": 0.0017100977198697069,
"loss": 2.4078,
"step": 665
},
{
"epoch": 1.4637362637362639,
"grad_norm": 0.018568173050880432,
"learning_rate": 0.001707654723127036,
"loss": 1.7385,
"step": 666
},
{
"epoch": 1.465934065934066,
"grad_norm": 0.030566638335585594,
"learning_rate": 0.0017052117263843649,
"loss": 2.3417,
"step": 667
},
{
"epoch": 1.4681318681318682,
"grad_norm": 0.02127690054476261,
"learning_rate": 0.001702768729641694,
"loss": 1.6669,
"step": 668
},
{
"epoch": 1.4703296703296704,
"grad_norm": 0.021866438910365105,
"learning_rate": 0.0017003257328990229,
"loss": 2.1077,
"step": 669
},
{
"epoch": 1.4725274725274726,
"grad_norm": 0.023326337337493896,
"learning_rate": 0.001697882736156352,
"loss": 1.8293,
"step": 670
},
{
"epoch": 1.4747252747252748,
"grad_norm": 0.022072531282901764,
"learning_rate": 0.0016954397394136809,
"loss": 2.0046,
"step": 671
},
{
"epoch": 1.476923076923077,
"grad_norm": 0.018785560503602028,
"learning_rate": 0.00169299674267101,
"loss": 2.4428,
"step": 672
},
{
"epoch": 1.4791208791208792,
"grad_norm": 0.02191714383661747,
"learning_rate": 0.0016905537459283389,
"loss": 2.1456,
"step": 673
},
{
"epoch": 1.4813186813186814,
"grad_norm": 0.030434895306825638,
"learning_rate": 0.001688110749185668,
"loss": 1.8778,
"step": 674
},
{
"epoch": 1.4835164835164836,
"grad_norm": 0.019160758703947067,
"learning_rate": 0.0016856677524429969,
"loss": 2.858,
"step": 675
},
{
"epoch": 1.4857142857142858,
"grad_norm": 0.09490322321653366,
"learning_rate": 0.0016832247557003258,
"loss": 2.34,
"step": 676
},
{
"epoch": 1.487912087912088,
"grad_norm": 0.06465135514736176,
"learning_rate": 0.0016807817589576549,
"loss": 2.3777,
"step": 677
},
{
"epoch": 1.4901098901098901,
"grad_norm": 0.03797115013003349,
"learning_rate": 0.0016783387622149838,
"loss": 2.5814,
"step": 678
},
{
"epoch": 1.4923076923076923,
"grad_norm": 0.023469626903533936,
"learning_rate": 0.0016758957654723129,
"loss": 1.9645,
"step": 679
},
{
"epoch": 1.4945054945054945,
"grad_norm": 0.02829805761575699,
"learning_rate": 0.0016734527687296418,
"loss": 1.8644,
"step": 680
},
{
"epoch": 1.4967032967032967,
"grad_norm": 0.6024360060691833,
"learning_rate": 0.0016710097719869709,
"loss": 2.372,
"step": 681
},
{
"epoch": 1.498901098901099,
"grad_norm": 0.03728770464658737,
"learning_rate": 0.0016685667752442998,
"loss": 2.558,
"step": 682
},
{
"epoch": 1.501098901098901,
"grad_norm": 0.028321657329797745,
"learning_rate": 0.0016661237785016289,
"loss": 2.1466,
"step": 683
},
{
"epoch": 1.5032967032967033,
"grad_norm": 0.024511734023690224,
"learning_rate": 0.0016636807817589578,
"loss": 2.5315,
"step": 684
},
{
"epoch": 1.5054945054945055,
"grad_norm": 0.08865869790315628,
"learning_rate": 0.0016612377850162867,
"loss": 1.9722,
"step": 685
},
{
"epoch": 1.5076923076923077,
"grad_norm": 0.020004810765385628,
"learning_rate": 0.0016587947882736158,
"loss": 2.3034,
"step": 686
},
{
"epoch": 1.5098901098901099,
"grad_norm": 0.02396407350897789,
"learning_rate": 0.0016563517915309447,
"loss": 2.0389,
"step": 687
},
{
"epoch": 1.512087912087912,
"grad_norm": 0.06900710612535477,
"learning_rate": 0.0016539087947882738,
"loss": 2.7552,
"step": 688
},
{
"epoch": 1.5142857142857142,
"grad_norm": 0.01738635078072548,
"learning_rate": 0.0016514657980456027,
"loss": 2.4093,
"step": 689
},
{
"epoch": 1.5164835164835164,
"grad_norm": 0.02039428800344467,
"learning_rate": 0.0016490228013029318,
"loss": 2.144,
"step": 690
},
{
"epoch": 1.5186813186813186,
"grad_norm": 0.023936979472637177,
"learning_rate": 0.0016465798045602607,
"loss": 2.5239,
"step": 691
},
{
"epoch": 1.5208791208791208,
"grad_norm": 0.019143912941217422,
"learning_rate": 0.0016441368078175898,
"loss": 2.9828,
"step": 692
},
{
"epoch": 1.523076923076923,
"grad_norm": 0.014268501661717892,
"learning_rate": 0.0016416938110749187,
"loss": 2.3899,
"step": 693
},
{
"epoch": 1.5252747252747252,
"grad_norm": 0.019946718588471413,
"learning_rate": 0.0016392508143322475,
"loss": 2.3974,
"step": 694
},
{
"epoch": 1.5274725274725274,
"grad_norm": 0.01846800372004509,
"learning_rate": 0.0016368078175895767,
"loss": 2.0194,
"step": 695
},
{
"epoch": 1.5296703296703296,
"grad_norm": 0.045574020594358444,
"learning_rate": 0.0016343648208469056,
"loss": 2.5844,
"step": 696
},
{
"epoch": 1.5318681318681318,
"grad_norm": 0.024428313598036766,
"learning_rate": 0.0016319218241042347,
"loss": 2.3314,
"step": 697
},
{
"epoch": 1.534065934065934,
"grad_norm": 0.049928318709135056,
"learning_rate": 0.0016294788273615636,
"loss": 1.8789,
"step": 698
},
{
"epoch": 1.5362637362637361,
"grad_norm": 0.01646183803677559,
"learning_rate": 0.0016270358306188927,
"loss": 1.9459,
"step": 699
},
{
"epoch": 1.5384615384615383,
"grad_norm": 0.09247796982526779,
"learning_rate": 0.0016245928338762216,
"loss": 2.221,
"step": 700
},
{
"epoch": 1.5406593406593405,
"grad_norm": 0.03557046130299568,
"learning_rate": 0.0016221498371335507,
"loss": 2.8312,
"step": 701
},
{
"epoch": 1.5428571428571427,
"grad_norm": 0.04367535933852196,
"learning_rate": 0.0016197068403908796,
"loss": 2.0734,
"step": 702
},
{
"epoch": 1.545054945054945,
"grad_norm": 0.117167629301548,
"learning_rate": 0.0016172638436482087,
"loss": 2.1822,
"step": 703
},
{
"epoch": 1.5472527472527473,
"grad_norm": 0.02050991728901863,
"learning_rate": 0.0016148208469055376,
"loss": 2.1213,
"step": 704
},
{
"epoch": 1.5494505494505495,
"grad_norm": 0.24483846127986908,
"learning_rate": 0.0016123778501628664,
"loss": 2.3509,
"step": 705
},
{
"epoch": 1.5516483516483517,
"grad_norm": 0.019078144803643227,
"learning_rate": 0.0016099348534201956,
"loss": 3.2477,
"step": 706
},
{
"epoch": 1.5538461538461539,
"grad_norm": 0.022861536592245102,
"learning_rate": 0.0016074918566775244,
"loss": 1.8546,
"step": 707
},
{
"epoch": 1.556043956043956,
"grad_norm": 0.03003828227519989,
"learning_rate": 0.0016050488599348536,
"loss": 3.425,
"step": 708
},
{
"epoch": 1.5582417582417583,
"grad_norm": 0.017295774072408676,
"learning_rate": 0.0016026058631921824,
"loss": 1.9467,
"step": 709
},
{
"epoch": 1.5604395604395604,
"grad_norm": 0.10394047200679779,
"learning_rate": 0.0016001628664495116,
"loss": 2.4743,
"step": 710
},
{
"epoch": 1.5626373626373626,
"grad_norm": 0.02505568042397499,
"learning_rate": 0.0015977198697068404,
"loss": 2.2586,
"step": 711
},
{
"epoch": 1.5648351648351648,
"grad_norm": 0.01780625805258751,
"learning_rate": 0.0015952768729641696,
"loss": 2.2126,
"step": 712
},
{
"epoch": 1.567032967032967,
"grad_norm": 0.033080946654081345,
"learning_rate": 0.0015928338762214984,
"loss": 2.0316,
"step": 713
},
{
"epoch": 1.5692307692307692,
"grad_norm": 0.10725748538970947,
"learning_rate": 0.0015903908794788273,
"loss": 2.0459,
"step": 714
},
{
"epoch": 1.5714285714285714,
"grad_norm": 0.027366140857338905,
"learning_rate": 0.0015879478827361564,
"loss": 2.5619,
"step": 715
},
{
"epoch": 1.5736263736263736,
"grad_norm": 0.020192671567201614,
"learning_rate": 0.0015855048859934853,
"loss": 2.3389,
"step": 716
},
{
"epoch": 1.5758241758241758,
"grad_norm": 0.019907068461179733,
"learning_rate": 0.0015830618892508145,
"loss": 2.5348,
"step": 717
},
{
"epoch": 1.578021978021978,
"grad_norm": 0.056603699922561646,
"learning_rate": 0.0015806188925081433,
"loss": 2.6302,
"step": 718
},
{
"epoch": 1.5802197802197804,
"grad_norm": 0.5138797760009766,
"learning_rate": 0.0015781758957654725,
"loss": 2.613,
"step": 719
},
{
"epoch": 1.5824175824175826,
"grad_norm": 0.023572716861963272,
"learning_rate": 0.0015757328990228013,
"loss": 2.9208,
"step": 720
},
{
"epoch": 1.5846153846153848,
"grad_norm": 0.3275434374809265,
"learning_rate": 0.0015732899022801305,
"loss": 2.8074,
"step": 721
},
{
"epoch": 1.586813186813187,
"grad_norm": 0.022186795249581337,
"learning_rate": 0.0015708469055374593,
"loss": 2.0281,
"step": 722
},
{
"epoch": 1.5890109890109891,
"grad_norm": 0.0342952236533165,
"learning_rate": 0.0015684039087947882,
"loss": 2.2266,
"step": 723
},
{
"epoch": 1.5912087912087913,
"grad_norm": 0.658846914768219,
"learning_rate": 0.0015659609120521173,
"loss": 2.9207,
"step": 724
},
{
"epoch": 1.5934065934065935,
"grad_norm": 0.05985492840409279,
"learning_rate": 0.0015635179153094462,
"loss": 2.9798,
"step": 725
},
{
"epoch": 1.5956043956043957,
"grad_norm": 0.014591915532946587,
"learning_rate": 0.0015610749185667753,
"loss": 2.4443,
"step": 726
},
{
"epoch": 1.597802197802198,
"grad_norm": 0.18519966304302216,
"learning_rate": 0.0015586319218241042,
"loss": 2.8557,
"step": 727
},
{
"epoch": 1.6,
"grad_norm": 0.048469241708517075,
"learning_rate": 0.0015561889250814333,
"loss": 2.1187,
"step": 728
},
{
"epoch": 1.6021978021978023,
"grad_norm": 0.02856939099729061,
"learning_rate": 0.0015537459283387622,
"loss": 2.6369,
"step": 729
},
{
"epoch": 1.6043956043956045,
"grad_norm": 0.04865863174200058,
"learning_rate": 0.0015513029315960913,
"loss": 2.3832,
"step": 730
},
{
"epoch": 1.6065934065934067,
"grad_norm": 0.01675734482705593,
"learning_rate": 0.0015488599348534202,
"loss": 2.1958,
"step": 731
},
{
"epoch": 1.6087912087912088,
"grad_norm": 0.0252480860799551,
"learning_rate": 0.0015464169381107491,
"loss": 2.0348,
"step": 732
},
{
"epoch": 1.610989010989011,
"grad_norm": 0.017718374729156494,
"learning_rate": 0.0015439739413680782,
"loss": 2.0343,
"step": 733
},
{
"epoch": 1.6131868131868132,
"grad_norm": 0.06915155798196793,
"learning_rate": 0.0015415309446254071,
"loss": 2.8718,
"step": 734
},
{
"epoch": 1.6153846153846154,
"grad_norm": 0.022865720093250275,
"learning_rate": 0.0015390879478827362,
"loss": 1.8484,
"step": 735
},
{
"epoch": 1.6175824175824176,
"grad_norm": 0.021261505782604218,
"learning_rate": 0.0015366449511400651,
"loss": 2.3206,
"step": 736
},
{
"epoch": 1.6197802197802198,
"grad_norm": 0.21212172508239746,
"learning_rate": 0.0015342019543973942,
"loss": 2.3917,
"step": 737
},
{
"epoch": 1.621978021978022,
"grad_norm": 0.016649320721626282,
"learning_rate": 0.0015317589576547231,
"loss": 2.4186,
"step": 738
},
{
"epoch": 1.6241758241758242,
"grad_norm": 0.01972966641187668,
"learning_rate": 0.0015293159609120522,
"loss": 2.3252,
"step": 739
},
{
"epoch": 1.6263736263736264,
"grad_norm": 0.020486561581492424,
"learning_rate": 0.0015268729641693811,
"loss": 2.2769,
"step": 740
},
{
"epoch": 1.6285714285714286,
"grad_norm": 0.05244067683815956,
"learning_rate": 0.0015244299674267102,
"loss": 2.4784,
"step": 741
},
{
"epoch": 1.6307692307692307,
"grad_norm": 0.030573679134249687,
"learning_rate": 0.0015219869706840391,
"loss": 2.2807,
"step": 742
},
{
"epoch": 1.632967032967033,
"grad_norm": 0.01655000075697899,
"learning_rate": 0.001519543973941368,
"loss": 2.0555,
"step": 743
},
{
"epoch": 1.6351648351648351,
"grad_norm": 0.03997037559747696,
"learning_rate": 0.0015171009771986971,
"loss": 2.4018,
"step": 744
},
{
"epoch": 1.6373626373626373,
"grad_norm": 0.01896047778427601,
"learning_rate": 0.001514657980456026,
"loss": 1.8674,
"step": 745
},
{
"epoch": 1.6395604395604395,
"grad_norm": 0.07418808341026306,
"learning_rate": 0.0015122149837133551,
"loss": 2.2366,
"step": 746
},
{
"epoch": 1.6417582417582417,
"grad_norm": 1.8761250972747803,
"learning_rate": 0.001509771986970684,
"loss": 2.5624,
"step": 747
},
{
"epoch": 1.6439560439560439,
"grad_norm": 0.016410140320658684,
"learning_rate": 0.0015073289902280131,
"loss": 1.8619,
"step": 748
},
{
"epoch": 1.646153846153846,
"grad_norm": 0.23896360397338867,
"learning_rate": 0.001504885993485342,
"loss": 1.9981,
"step": 749
},
{
"epoch": 1.6483516483516483,
"grad_norm": 0.027410367503762245,
"learning_rate": 0.0015024429967426711,
"loss": 2.4737,
"step": 750
},
{
"epoch": 1.6505494505494505,
"grad_norm": 0.03904625028371811,
"learning_rate": 0.0015,
"loss": 2.229,
"step": 751
},
{
"epoch": 1.6527472527472526,
"grad_norm": 0.03253611549735069,
"learning_rate": 0.001497557003257329,
"loss": 2.0862,
"step": 752
},
{
"epoch": 1.6549450549450548,
"grad_norm": 0.04649863392114639,
"learning_rate": 0.001495114006514658,
"loss": 1.9857,
"step": 753
},
{
"epoch": 1.657142857142857,
"grad_norm": 0.04062139242887497,
"learning_rate": 0.001492671009771987,
"loss": 2.4295,
"step": 754
},
{
"epoch": 1.6593406593406592,
"grad_norm": 0.015858465805649757,
"learning_rate": 0.001490228013029316,
"loss": 2.2996,
"step": 755
},
{
"epoch": 1.6615384615384614,
"grad_norm": 0.033834271132946014,
"learning_rate": 0.001487785016286645,
"loss": 2.3486,
"step": 756
},
{
"epoch": 1.6637362637362636,
"grad_norm": 0.025404533371329308,
"learning_rate": 0.001485342019543974,
"loss": 2.3052,
"step": 757
},
{
"epoch": 1.6659340659340658,
"grad_norm": 0.024370385333895683,
"learning_rate": 0.001482899022801303,
"loss": 2.3658,
"step": 758
},
{
"epoch": 1.668131868131868,
"grad_norm": 0.079036183655262,
"learning_rate": 0.001480456026058632,
"loss": 2.3827,
"step": 759
},
{
"epoch": 1.6703296703296702,
"grad_norm": 0.035794686526060104,
"learning_rate": 0.001478013029315961,
"loss": 2.5318,
"step": 760
},
{
"epoch": 1.6725274725274726,
"grad_norm": 0.019976086914539337,
"learning_rate": 0.0014755700325732898,
"loss": 2.2118,
"step": 761
},
{
"epoch": 1.6747252747252748,
"grad_norm": 0.013210524804890156,
"learning_rate": 0.001473127035830619,
"loss": 2.1005,
"step": 762
},
{
"epoch": 1.676923076923077,
"grad_norm": 0.0185015257447958,
"learning_rate": 0.0014706840390879478,
"loss": 2.3593,
"step": 763
},
{
"epoch": 1.6791208791208792,
"grad_norm": 0.09070681035518646,
"learning_rate": 0.001468241042345277,
"loss": 2.2919,
"step": 764
},
{
"epoch": 1.6813186813186813,
"grad_norm": 0.02373500168323517,
"learning_rate": 0.0014657980456026058,
"loss": 1.7106,
"step": 765
},
{
"epoch": 1.6835164835164835,
"grad_norm": 0.030028127133846283,
"learning_rate": 0.001463355048859935,
"loss": 2.4783,
"step": 766
},
{
"epoch": 1.6857142857142857,
"grad_norm": 0.03654913976788521,
"learning_rate": 0.0014609120521172638,
"loss": 1.765,
"step": 767
},
{
"epoch": 1.687912087912088,
"grad_norm": 0.7053762078285217,
"learning_rate": 0.001458469055374593,
"loss": 2.817,
"step": 768
},
{
"epoch": 1.69010989010989,
"grad_norm": 0.02265395224094391,
"learning_rate": 0.0014560260586319218,
"loss": 2.1218,
"step": 769
},
{
"epoch": 1.6923076923076923,
"grad_norm": 0.055480122566223145,
"learning_rate": 0.001453583061889251,
"loss": 2.4019,
"step": 770
},
{
"epoch": 1.6945054945054945,
"grad_norm": 0.01911146566271782,
"learning_rate": 0.0014511400651465798,
"loss": 2.2388,
"step": 771
},
{
"epoch": 1.6967032967032967,
"grad_norm": 0.02012067846953869,
"learning_rate": 0.0014486970684039087,
"loss": 1.8873,
"step": 772
},
{
"epoch": 1.6989010989010989,
"grad_norm": 1.8038663864135742,
"learning_rate": 0.0014462540716612378,
"loss": 2.6876,
"step": 773
},
{
"epoch": 1.701098901098901,
"grad_norm": 0.021777093410491943,
"learning_rate": 0.0014438110749185667,
"loss": 2.2426,
"step": 774
},
{
"epoch": 1.7032967032967035,
"grad_norm": 0.02293497323989868,
"learning_rate": 0.0014413680781758958,
"loss": 2.0997,
"step": 775
},
{
"epoch": 1.7054945054945057,
"grad_norm": 0.025982249528169632,
"learning_rate": 0.0014389250814332247,
"loss": 2.757,
"step": 776
},
{
"epoch": 1.7076923076923078,
"grad_norm": 0.03637106344103813,
"learning_rate": 0.0014364820846905538,
"loss": 1.9177,
"step": 777
},
{
"epoch": 1.70989010989011,
"grad_norm": 0.020664863288402557,
"learning_rate": 0.0014340390879478827,
"loss": 1.7164,
"step": 778
},
{
"epoch": 1.7120879120879122,
"grad_norm": 0.7723676562309265,
"learning_rate": 0.0014315960912052118,
"loss": 2.3256,
"step": 779
},
{
"epoch": 1.7142857142857144,
"grad_norm": 0.19121606647968292,
"learning_rate": 0.0014291530944625407,
"loss": 2.9985,
"step": 780
},
{
"epoch": 1.7164835164835166,
"grad_norm": 0.16874554753303528,
"learning_rate": 0.0014267100977198696,
"loss": 2.3197,
"step": 781
},
{
"epoch": 1.7186813186813188,
"grad_norm": 0.018054982647299767,
"learning_rate": 0.0014242671009771987,
"loss": 1.9637,
"step": 782
},
{
"epoch": 1.720879120879121,
"grad_norm": 0.02764027565717697,
"learning_rate": 0.0014218241042345276,
"loss": 1.9819,
"step": 783
},
{
"epoch": 1.7230769230769232,
"grad_norm": 0.1550895720720291,
"learning_rate": 0.0014193811074918567,
"loss": 2.2046,
"step": 784
},
{
"epoch": 1.7252747252747254,
"grad_norm": 0.026083623990416527,
"learning_rate": 0.0014169381107491856,
"loss": 2.4969,
"step": 785
},
{
"epoch": 1.7274725274725276,
"grad_norm": 0.19525639712810516,
"learning_rate": 0.0014144951140065147,
"loss": 2.5478,
"step": 786
},
{
"epoch": 1.7296703296703297,
"grad_norm": 0.02185739390552044,
"learning_rate": 0.0014120521172638436,
"loss": 1.9449,
"step": 787
},
{
"epoch": 1.731868131868132,
"grad_norm": 0.06631511449813843,
"learning_rate": 0.0014096091205211727,
"loss": 2.2371,
"step": 788
},
{
"epoch": 1.7340659340659341,
"grad_norm": 0.03651295229792595,
"learning_rate": 0.0014071661237785016,
"loss": 2.3563,
"step": 789
},
{
"epoch": 1.7362637362637363,
"grad_norm": 0.02548600733280182,
"learning_rate": 0.0014047231270358305,
"loss": 2.3545,
"step": 790
},
{
"epoch": 1.7384615384615385,
"grad_norm": 0.3343101739883423,
"learning_rate": 0.0014022801302931596,
"loss": 3.2214,
"step": 791
},
{
"epoch": 1.7406593406593407,
"grad_norm": 0.02746954746544361,
"learning_rate": 0.0013998371335504885,
"loss": 2.0023,
"step": 792
},
{
"epoch": 1.7428571428571429,
"grad_norm": 0.01869889721274376,
"learning_rate": 0.0013973941368078176,
"loss": 2.5837,
"step": 793
},
{
"epoch": 1.745054945054945,
"grad_norm": 0.46588966250419617,
"learning_rate": 0.0013949511400651465,
"loss": 2.5131,
"step": 794
},
{
"epoch": 1.7472527472527473,
"grad_norm": 0.17031285166740417,
"learning_rate": 0.0013925081433224756,
"loss": 2.495,
"step": 795
},
{
"epoch": 1.7494505494505495,
"grad_norm": 0.04793461412191391,
"learning_rate": 0.0013900651465798045,
"loss": 2.1911,
"step": 796
},
{
"epoch": 1.7516483516483516,
"grad_norm": 0.0365927517414093,
"learning_rate": 0.0013876221498371336,
"loss": 2.7859,
"step": 797
},
{
"epoch": 1.7538461538461538,
"grad_norm": 0.017322303727269173,
"learning_rate": 0.0013851791530944625,
"loss": 1.8905,
"step": 798
},
{
"epoch": 1.756043956043956,
"grad_norm": 0.029578961431980133,
"learning_rate": 0.0013827361563517914,
"loss": 2.7304,
"step": 799
},
{
"epoch": 1.7582417582417582,
"grad_norm": 0.06277059763669968,
"learning_rate": 0.0013802931596091205,
"loss": 2.2052,
"step": 800
},
{
"epoch": 1.7604395604395604,
"grad_norm": 0.02017611637711525,
"learning_rate": 0.0013778501628664494,
"loss": 2.529,
"step": 801
},
{
"epoch": 1.7626373626373626,
"grad_norm": 0.02929922565817833,
"learning_rate": 0.0013754071661237785,
"loss": 2.4142,
"step": 802
},
{
"epoch": 1.7648351648351648,
"grad_norm": 0.04857508838176727,
"learning_rate": 0.0013729641693811074,
"loss": 2.5335,
"step": 803
},
{
"epoch": 1.767032967032967,
"grad_norm": 0.0210055410861969,
"learning_rate": 0.0013705211726384365,
"loss": 1.929,
"step": 804
},
{
"epoch": 1.7692307692307692,
"grad_norm": 0.018840303644537926,
"learning_rate": 0.0013680781758957654,
"loss": 2.1591,
"step": 805
},
{
"epoch": 1.7714285714285714,
"grad_norm": 0.06925947219133377,
"learning_rate": 0.0013656351791530945,
"loss": 2.0311,
"step": 806
},
{
"epoch": 1.7736263736263735,
"grad_norm": 0.023167254403233528,
"learning_rate": 0.0013631921824104234,
"loss": 2.2917,
"step": 807
},
{
"epoch": 1.7758241758241757,
"grad_norm": 0.1454468071460724,
"learning_rate": 0.0013607491856677525,
"loss": 2.0322,
"step": 808
},
{
"epoch": 1.778021978021978,
"grad_norm": 0.036466170102357864,
"learning_rate": 0.0013583061889250814,
"loss": 2.3067,
"step": 809
},
{
"epoch": 1.7802197802197801,
"grad_norm": 0.3248104155063629,
"learning_rate": 0.0013558631921824103,
"loss": 1.9673,
"step": 810
},
{
"epoch": 1.7824175824175823,
"grad_norm": 0.01914435438811779,
"learning_rate": 0.0013534201954397394,
"loss": 2.1876,
"step": 811
},
{
"epoch": 1.7846153846153845,
"grad_norm": 0.028510965406894684,
"learning_rate": 0.0013509771986970683,
"loss": 1.7274,
"step": 812
},
{
"epoch": 1.7868131868131867,
"grad_norm": 0.018193287774920464,
"learning_rate": 0.0013485342019543974,
"loss": 2.3518,
"step": 813
},
{
"epoch": 1.7890109890109889,
"grad_norm": 0.38431188464164734,
"learning_rate": 0.0013460912052117263,
"loss": 2.2638,
"step": 814
},
{
"epoch": 1.791208791208791,
"grad_norm": 0.020502077415585518,
"learning_rate": 0.0013436482084690554,
"loss": 1.9018,
"step": 815
},
{
"epoch": 1.7934065934065933,
"grad_norm": 0.0531279556453228,
"learning_rate": 0.0013412052117263843,
"loss": 2.3438,
"step": 816
},
{
"epoch": 1.7956043956043954,
"grad_norm": 0.23906473815441132,
"learning_rate": 0.0013387622149837134,
"loss": 2.3462,
"step": 817
},
{
"epoch": 1.7978021978021979,
"grad_norm": 0.02951381541788578,
"learning_rate": 0.0013363192182410423,
"loss": 2.9358,
"step": 818
},
{
"epoch": 1.8,
"grad_norm": 0.018911249935626984,
"learning_rate": 0.0013338762214983712,
"loss": 2.084,
"step": 819
},
{
"epoch": 1.8021978021978022,
"grad_norm": 0.04316522926092148,
"learning_rate": 0.0013314332247557003,
"loss": 2.3519,
"step": 820
},
{
"epoch": 1.8043956043956044,
"grad_norm": 0.04044129326939583,
"learning_rate": 0.0013289902280130292,
"loss": 2.4669,
"step": 821
},
{
"epoch": 1.8065934065934066,
"grad_norm": 0.025719380006194115,
"learning_rate": 0.0013265472312703583,
"loss": 2.4084,
"step": 822
},
{
"epoch": 1.8087912087912088,
"grad_norm": 0.02503850869834423,
"learning_rate": 0.0013241042345276872,
"loss": 2.6237,
"step": 823
},
{
"epoch": 1.810989010989011,
"grad_norm": 0.022818664088845253,
"learning_rate": 0.0013216612377850163,
"loss": 1.9123,
"step": 824
},
{
"epoch": 1.8131868131868132,
"grad_norm": 0.5526257157325745,
"learning_rate": 0.0013192182410423452,
"loss": 2.1784,
"step": 825
},
{
"epoch": 1.8153846153846154,
"grad_norm": 0.04064784571528435,
"learning_rate": 0.0013167752442996743,
"loss": 2.3223,
"step": 826
},
{
"epoch": 1.8175824175824176,
"grad_norm": 0.02771565318107605,
"learning_rate": 0.0013143322475570032,
"loss": 2.174,
"step": 827
},
{
"epoch": 1.8197802197802198,
"grad_norm": 0.027570048347115517,
"learning_rate": 0.0013118892508143323,
"loss": 2.1516,
"step": 828
},
{
"epoch": 1.821978021978022,
"grad_norm": 0.04362328350543976,
"learning_rate": 0.0013094462540716614,
"loss": 1.8403,
"step": 829
},
{
"epoch": 1.8241758241758241,
"grad_norm": 0.022229352965950966,
"learning_rate": 0.0013070032573289903,
"loss": 2.1572,
"step": 830
},
{
"epoch": 1.8263736263736263,
"grad_norm": 0.1754000037908554,
"learning_rate": 0.0013045602605863194,
"loss": 2.3761,
"step": 831
},
{
"epoch": 1.8285714285714287,
"grad_norm": 0.020969560369849205,
"learning_rate": 0.0013021172638436483,
"loss": 2.1617,
"step": 832
},
{
"epoch": 1.830769230769231,
"grad_norm": 0.03290945291519165,
"learning_rate": 0.0012996742671009772,
"loss": 2.2782,
"step": 833
},
{
"epoch": 1.8329670329670331,
"grad_norm": 0.05761655420064926,
"learning_rate": 0.0012972312703583063,
"loss": 2.2092,
"step": 834
},
{
"epoch": 1.8351648351648353,
"grad_norm": 0.017952682450413704,
"learning_rate": 0.0012947882736156352,
"loss": 1.9448,
"step": 835
},
{
"epoch": 1.8373626373626375,
"grad_norm": 0.01727917790412903,
"learning_rate": 0.0012923452768729643,
"loss": 1.8934,
"step": 836
},
{
"epoch": 1.8395604395604397,
"grad_norm": 0.060262586921453476,
"learning_rate": 0.0012899022801302932,
"loss": 2.3489,
"step": 837
},
{
"epoch": 1.8417582417582419,
"grad_norm": 0.021315086632966995,
"learning_rate": 0.0012874592833876223,
"loss": 2.2992,
"step": 838
},
{
"epoch": 1.843956043956044,
"grad_norm": 0.09320878982543945,
"learning_rate": 0.0012850162866449512,
"loss": 2.2567,
"step": 839
},
{
"epoch": 1.8461538461538463,
"grad_norm": 0.0855710506439209,
"learning_rate": 0.0012825732899022803,
"loss": 2.6673,
"step": 840
},
{
"epoch": 1.8483516483516484,
"grad_norm": 0.022635443136096,
"learning_rate": 0.0012801302931596092,
"loss": 1.9466,
"step": 841
},
{
"epoch": 1.8505494505494506,
"grad_norm": 0.9853588938713074,
"learning_rate": 0.0012776872964169383,
"loss": 2.2651,
"step": 842
},
{
"epoch": 1.8527472527472528,
"grad_norm": 0.4136480689048767,
"learning_rate": 0.0012752442996742672,
"loss": 2.3777,
"step": 843
},
{
"epoch": 1.854945054945055,
"grad_norm": 0.02005811035633087,
"learning_rate": 0.001272801302931596,
"loss": 1.9171,
"step": 844
},
{
"epoch": 1.8571428571428572,
"grad_norm": 0.04091151803731918,
"learning_rate": 0.0012703583061889252,
"loss": 2.6457,
"step": 845
},
{
"epoch": 1.8593406593406594,
"grad_norm": 0.020348239690065384,
"learning_rate": 0.001267915309446254,
"loss": 2.1558,
"step": 846
},
{
"epoch": 1.8615384615384616,
"grad_norm": 0.6916945576667786,
"learning_rate": 0.0012654723127035832,
"loss": 2.2883,
"step": 847
},
{
"epoch": 1.8637362637362638,
"grad_norm": 0.02693920210003853,
"learning_rate": 0.001263029315960912,
"loss": 2.4018,
"step": 848
},
{
"epoch": 1.865934065934066,
"grad_norm": 0.03278215974569321,
"learning_rate": 0.0012605863192182412,
"loss": 2.606,
"step": 849
},
{
"epoch": 1.8681318681318682,
"grad_norm": 0.015078420750796795,
"learning_rate": 0.00125814332247557,
"loss": 2.1508,
"step": 850
},
{
"epoch": 1.8703296703296703,
"grad_norm": 0.02730582281947136,
"learning_rate": 0.0012557003257328992,
"loss": 2.0825,
"step": 851
},
{
"epoch": 1.8725274725274725,
"grad_norm": 0.22723990678787231,
"learning_rate": 0.001253257328990228,
"loss": 2.2218,
"step": 852
},
{
"epoch": 1.8747252747252747,
"grad_norm": 0.04788484796881676,
"learning_rate": 0.001250814332247557,
"loss": 2.3423,
"step": 853
},
{
"epoch": 1.876923076923077,
"grad_norm": 0.025127051398158073,
"learning_rate": 0.001248371335504886,
"loss": 2.4592,
"step": 854
},
{
"epoch": 1.879120879120879,
"grad_norm": 0.03162353113293648,
"learning_rate": 0.001245928338762215,
"loss": 2.278,
"step": 855
},
{
"epoch": 1.8813186813186813,
"grad_norm": 0.01992758736014366,
"learning_rate": 0.001243485342019544,
"loss": 2.4541,
"step": 856
},
{
"epoch": 1.8835164835164835,
"grad_norm": 0.09057781845331192,
"learning_rate": 0.001241042345276873,
"loss": 3.11,
"step": 857
},
{
"epoch": 1.8857142857142857,
"grad_norm": 0.059875186532735825,
"learning_rate": 0.001238599348534202,
"loss": 2.1481,
"step": 858
},
{
"epoch": 1.8879120879120879,
"grad_norm": 0.020253119990229607,
"learning_rate": 0.001236156351791531,
"loss": 2.5706,
"step": 859
},
{
"epoch": 1.89010989010989,
"grad_norm": 0.02147708646953106,
"learning_rate": 0.00123371335504886,
"loss": 1.9418,
"step": 860
},
{
"epoch": 1.8923076923076922,
"grad_norm": 0.018997633829712868,
"learning_rate": 0.001231270358306189,
"loss": 2.1362,
"step": 861
},
{
"epoch": 1.8945054945054944,
"grad_norm": 0.02490854263305664,
"learning_rate": 0.0012288273615635179,
"loss": 2.2146,
"step": 862
},
{
"epoch": 1.8967032967032966,
"grad_norm": 0.027262361720204353,
"learning_rate": 0.001226384364820847,
"loss": 1.9707,
"step": 863
},
{
"epoch": 1.8989010989010988,
"grad_norm": 0.02658863551914692,
"learning_rate": 0.0012239413680781759,
"loss": 1.9156,
"step": 864
},
{
"epoch": 1.901098901098901,
"grad_norm": 0.03517833352088928,
"learning_rate": 0.001221498371335505,
"loss": 2.1844,
"step": 865
},
{
"epoch": 1.9032967032967032,
"grad_norm": 0.017088770866394043,
"learning_rate": 0.0012190553745928339,
"loss": 1.5344,
"step": 866
},
{
"epoch": 1.9054945054945054,
"grad_norm": 1.013632893562317,
"learning_rate": 0.001216612377850163,
"loss": 2.7897,
"step": 867
},
{
"epoch": 1.9076923076923076,
"grad_norm": 0.037594038993120193,
"learning_rate": 0.0012141693811074919,
"loss": 2.2217,
"step": 868
},
{
"epoch": 1.9098901098901098,
"grad_norm": 0.021701455116271973,
"learning_rate": 0.001211726384364821,
"loss": 2.6411,
"step": 869
},
{
"epoch": 1.912087912087912,
"grad_norm": 0.020081305876374245,
"learning_rate": 0.0012092833876221499,
"loss": 2.1875,
"step": 870
},
{
"epoch": 1.9142857142857141,
"grad_norm": 0.08792126178741455,
"learning_rate": 0.0012068403908794788,
"loss": 2.886,
"step": 871
},
{
"epoch": 1.9164835164835163,
"grad_norm": 0.28754520416259766,
"learning_rate": 0.0012043973941368079,
"loss": 2.3883,
"step": 872
},
{
"epoch": 1.9186813186813185,
"grad_norm": 0.042895324528217316,
"learning_rate": 0.0012019543973941368,
"loss": 2.0901,
"step": 873
},
{
"epoch": 1.9208791208791207,
"grad_norm": 0.018038902431726456,
"learning_rate": 0.0011995114006514659,
"loss": 2.3645,
"step": 874
},
{
"epoch": 1.9230769230769231,
"grad_norm": 0.019804934039711952,
"learning_rate": 0.0011970684039087948,
"loss": 1.8583,
"step": 875
},
{
"epoch": 1.9252747252747253,
"grad_norm": 0.02688548155128956,
"learning_rate": 0.0011946254071661239,
"loss": 2.0662,
"step": 876
},
{
"epoch": 1.9274725274725275,
"grad_norm": 0.022085871547460556,
"learning_rate": 0.0011921824104234528,
"loss": 2.1169,
"step": 877
},
{
"epoch": 1.9296703296703297,
"grad_norm": 0.015348690561950207,
"learning_rate": 0.001189739413680782,
"loss": 2.0945,
"step": 878
},
{
"epoch": 1.9318681318681319,
"grad_norm": 0.028527235612273216,
"learning_rate": 0.0011872964169381108,
"loss": 1.7392,
"step": 879
},
{
"epoch": 1.934065934065934,
"grad_norm": 0.04580596089363098,
"learning_rate": 0.00118485342019544,
"loss": 2.0234,
"step": 880
},
{
"epoch": 1.9362637362637363,
"grad_norm": 0.015356466174125671,
"learning_rate": 0.0011824104234527688,
"loss": 1.7478,
"step": 881
},
{
"epoch": 1.9384615384615385,
"grad_norm": 0.8543334603309631,
"learning_rate": 0.0011799674267100977,
"loss": 2.2785,
"step": 882
},
{
"epoch": 1.9406593406593406,
"grad_norm": 0.016521112993359566,
"learning_rate": 0.0011775244299674268,
"loss": 2.4031,
"step": 883
},
{
"epoch": 1.9428571428571428,
"grad_norm": 0.16791580617427826,
"learning_rate": 0.0011750814332247557,
"loss": 1.8242,
"step": 884
},
{
"epoch": 1.945054945054945,
"grad_norm": 0.022984879091382027,
"learning_rate": 0.0011726384364820848,
"loss": 2.2155,
"step": 885
},
{
"epoch": 1.9472527472527472,
"grad_norm": 0.04192487895488739,
"learning_rate": 0.0011701954397394137,
"loss": 2.1318,
"step": 886
},
{
"epoch": 1.9494505494505494,
"grad_norm": 0.35755935311317444,
"learning_rate": 0.0011677524429967428,
"loss": 2.3566,
"step": 887
},
{
"epoch": 1.9516483516483516,
"grad_norm": 0.018166419118642807,
"learning_rate": 0.0011653094462540717,
"loss": 1.9839,
"step": 888
},
{
"epoch": 1.953846153846154,
"grad_norm": 0.036898281425237656,
"learning_rate": 0.0011628664495114008,
"loss": 1.8559,
"step": 889
},
{
"epoch": 1.9560439560439562,
"grad_norm": 0.030428852885961533,
"learning_rate": 0.0011604234527687297,
"loss": 2.2783,
"step": 890
},
{
"epoch": 1.9582417582417584,
"grad_norm": 0.02016536518931389,
"learning_rate": 0.0011579804560260586,
"loss": 2.3045,
"step": 891
},
{
"epoch": 1.9604395604395606,
"grad_norm": 1.5722975730895996,
"learning_rate": 0.0011555374592833877,
"loss": 2.1986,
"step": 892
},
{
"epoch": 1.9626373626373628,
"grad_norm": 0.022676756605505943,
"learning_rate": 0.0011530944625407166,
"loss": 2.2735,
"step": 893
},
{
"epoch": 1.964835164835165,
"grad_norm": 0.02780989557504654,
"learning_rate": 0.0011506514657980457,
"loss": 1.8358,
"step": 894
},
{
"epoch": 1.9670329670329672,
"grad_norm": 0.0158417746424675,
"learning_rate": 0.0011482084690553746,
"loss": 1.8268,
"step": 895
},
{
"epoch": 1.9692307692307693,
"grad_norm": 0.019312061369419098,
"learning_rate": 0.0011457654723127037,
"loss": 1.9068,
"step": 896
},
{
"epoch": 1.9714285714285715,
"grad_norm": 0.08078934252262115,
"learning_rate": 0.0011433224755700326,
"loss": 2.1381,
"step": 897
},
{
"epoch": 1.9736263736263737,
"grad_norm": 0.3867344856262207,
"learning_rate": 0.0011408794788273617,
"loss": 2.6698,
"step": 898
},
{
"epoch": 1.975824175824176,
"grad_norm": 0.028593968600034714,
"learning_rate": 0.0011384364820846906,
"loss": 1.9593,
"step": 899
},
{
"epoch": 1.978021978021978,
"grad_norm": 0.02338896133005619,
"learning_rate": 0.0011359934853420195,
"loss": 2.2418,
"step": 900
},
{
"epoch": 1.9802197802197803,
"grad_norm": 0.03455771133303642,
"learning_rate": 0.0011335504885993486,
"loss": 2.2002,
"step": 901
},
{
"epoch": 1.9824175824175825,
"grad_norm": 0.04142335057258606,
"learning_rate": 0.0011311074918566775,
"loss": 2.2467,
"step": 902
},
{
"epoch": 1.9846153846153847,
"grad_norm": 0.631840705871582,
"learning_rate": 0.0011286644951140066,
"loss": 2.531,
"step": 903
},
{
"epoch": 1.9868131868131869,
"grad_norm": 0.1804543137550354,
"learning_rate": 0.0011262214983713355,
"loss": 2.4949,
"step": 904
},
{
"epoch": 1.989010989010989,
"grad_norm": 0.02836310677230358,
"learning_rate": 0.0011237785016286646,
"loss": 2.4418,
"step": 905
},
{
"epoch": 1.9912087912087912,
"grad_norm": 0.06665114313364029,
"learning_rate": 0.0011213355048859935,
"loss": 2.2426,
"step": 906
},
{
"epoch": 1.9934065934065934,
"grad_norm": 0.037149034440517426,
"learning_rate": 0.0011188925081433226,
"loss": 2.3581,
"step": 907
},
{
"epoch": 1.9956043956043956,
"grad_norm": 0.04913489893078804,
"learning_rate": 0.0011164495114006515,
"loss": 2.0737,
"step": 908
},
{
"epoch": 1.9978021978021978,
"grad_norm": 0.5675652623176575,
"learning_rate": 0.0011140065146579806,
"loss": 2.0931,
"step": 909
},
{
"epoch": 2.0,
"grad_norm": 0.022958753630518913,
"learning_rate": 0.0011115635179153095,
"loss": 1.7834,
"step": 910
},
{
"epoch": 2.002197802197802,
"grad_norm": 0.09967091679573059,
"learning_rate": 0.0011091205211726384,
"loss": 2.2945,
"step": 911
},
{
"epoch": 2.0043956043956044,
"grad_norm": 0.032395899295806885,
"learning_rate": 0.0011066775244299675,
"loss": 1.7992,
"step": 912
},
{
"epoch": 2.0065934065934066,
"grad_norm": 0.023649347946047783,
"learning_rate": 0.0011042345276872964,
"loss": 2.2344,
"step": 913
},
{
"epoch": 2.0087912087912088,
"grad_norm": 0.09626628458499908,
"learning_rate": 0.0011017915309446255,
"loss": 2.2377,
"step": 914
},
{
"epoch": 2.010989010989011,
"grad_norm": 0.04631178453564644,
"learning_rate": 0.0010993485342019544,
"loss": 2.0251,
"step": 915
},
{
"epoch": 2.013186813186813,
"grad_norm": 0.0292007215321064,
"learning_rate": 0.0010969055374592835,
"loss": 2.0317,
"step": 916
},
{
"epoch": 2.0153846153846153,
"grad_norm": 0.6256979703903198,
"learning_rate": 0.0010944625407166124,
"loss": 2.1063,
"step": 917
},
{
"epoch": 2.0175824175824175,
"grad_norm": 0.02347748540341854,
"learning_rate": 0.0010920195439739415,
"loss": 2.0575,
"step": 918
},
{
"epoch": 2.0197802197802197,
"grad_norm": 0.03525625541806221,
"learning_rate": 0.0010895765472312704,
"loss": 1.9979,
"step": 919
},
{
"epoch": 2.021978021978022,
"grad_norm": 0.026730777695775032,
"learning_rate": 0.0010871335504885993,
"loss": 2.6205,
"step": 920
},
{
"epoch": 2.024175824175824,
"grad_norm": 0.01706955023109913,
"learning_rate": 0.0010846905537459284,
"loss": 2.467,
"step": 921
},
{
"epoch": 2.0263736263736263,
"grad_norm": 0.015380855649709702,
"learning_rate": 0.0010822475570032573,
"loss": 1.8684,
"step": 922
},
{
"epoch": 2.0285714285714285,
"grad_norm": 0.020673319697380066,
"learning_rate": 0.0010798045602605864,
"loss": 2.4991,
"step": 923
},
{
"epoch": 2.0307692307692307,
"grad_norm": 1.6947705745697021,
"learning_rate": 0.0010773615635179153,
"loss": 2.0735,
"step": 924
},
{
"epoch": 2.032967032967033,
"grad_norm": 0.5256295204162598,
"learning_rate": 0.0010749185667752444,
"loss": 1.8702,
"step": 925
},
{
"epoch": 2.035164835164835,
"grad_norm": 0.03467399999499321,
"learning_rate": 0.0010724755700325733,
"loss": 2.784,
"step": 926
},
{
"epoch": 2.0373626373626372,
"grad_norm": 0.021759606897830963,
"learning_rate": 0.0010700325732899024,
"loss": 1.9501,
"step": 927
},
{
"epoch": 2.0395604395604394,
"grad_norm": 0.019610080868005753,
"learning_rate": 0.0010675895765472313,
"loss": 1.9421,
"step": 928
},
{
"epoch": 2.0417582417582416,
"grad_norm": 0.016130128875374794,
"learning_rate": 0.0010651465798045602,
"loss": 1.6221,
"step": 929
},
{
"epoch": 2.043956043956044,
"grad_norm": 0.1186121255159378,
"learning_rate": 0.0010627035830618893,
"loss": 3.0164,
"step": 930
},
{
"epoch": 2.046153846153846,
"grad_norm": 0.029710182920098305,
"learning_rate": 0.0010602605863192182,
"loss": 1.785,
"step": 931
},
{
"epoch": 2.048351648351648,
"grad_norm": 0.02839326672255993,
"learning_rate": 0.0010578175895765473,
"loss": 1.8494,
"step": 932
},
{
"epoch": 2.0505494505494504,
"grad_norm": 0.021341705694794655,
"learning_rate": 0.0010553745928338762,
"loss": 2.2259,
"step": 933
},
{
"epoch": 2.0527472527472526,
"grad_norm": 0.015135948546230793,
"learning_rate": 0.0010529315960912053,
"loss": 1.6042,
"step": 934
},
{
"epoch": 2.0549450549450547,
"grad_norm": 0.018283462151885033,
"learning_rate": 0.0010504885993485342,
"loss": 2.1276,
"step": 935
},
{
"epoch": 2.057142857142857,
"grad_norm": 0.0177579615265131,
"learning_rate": 0.0010480456026058633,
"loss": 2.3866,
"step": 936
},
{
"epoch": 2.059340659340659,
"grad_norm": 0.01559323538094759,
"learning_rate": 0.0010456026058631922,
"loss": 1.6834,
"step": 937
},
{
"epoch": 2.0615384615384613,
"grad_norm": 0.027492158114910126,
"learning_rate": 0.0010431596091205213,
"loss": 1.9854,
"step": 938
},
{
"epoch": 2.0637362637362635,
"grad_norm": 0.03479871153831482,
"learning_rate": 0.0010407166123778502,
"loss": 2.2403,
"step": 939
},
{
"epoch": 2.065934065934066,
"grad_norm": 0.02272411622107029,
"learning_rate": 0.001038273615635179,
"loss": 2.2557,
"step": 940
},
{
"epoch": 2.0681318681318683,
"grad_norm": 0.03517911583185196,
"learning_rate": 0.0010358306188925082,
"loss": 1.8009,
"step": 941
},
{
"epoch": 2.0703296703296705,
"grad_norm": 0.03971957787871361,
"learning_rate": 0.001033387622149837,
"loss": 2.3212,
"step": 942
},
{
"epoch": 2.0725274725274727,
"grad_norm": 0.022784944623708725,
"learning_rate": 0.0010309446254071662,
"loss": 1.9421,
"step": 943
},
{
"epoch": 2.074725274725275,
"grad_norm": 0.028831766918301582,
"learning_rate": 0.001028501628664495,
"loss": 2.0278,
"step": 944
},
{
"epoch": 2.076923076923077,
"grad_norm": 0.020596252754330635,
"learning_rate": 0.0010260586319218242,
"loss": 1.9585,
"step": 945
},
{
"epoch": 2.0791208791208793,
"grad_norm": 0.0398065485060215,
"learning_rate": 0.001023615635179153,
"loss": 2.1131,
"step": 946
},
{
"epoch": 2.0813186813186815,
"grad_norm": 0.0849858969449997,
"learning_rate": 0.0010211726384364822,
"loss": 1.9648,
"step": 947
},
{
"epoch": 2.0835164835164837,
"grad_norm": 0.03697577118873596,
"learning_rate": 0.001018729641693811,
"loss": 2.1714,
"step": 948
},
{
"epoch": 2.085714285714286,
"grad_norm": 0.0196391474455595,
"learning_rate": 0.00101628664495114,
"loss": 1.8086,
"step": 949
},
{
"epoch": 2.087912087912088,
"grad_norm": 0.05924411118030548,
"learning_rate": 0.001013843648208469,
"loss": 2.1445,
"step": 950
},
{
"epoch": 2.0901098901098902,
"grad_norm": 0.02788875438272953,
"learning_rate": 0.001011400651465798,
"loss": 2.0952,
"step": 951
},
{
"epoch": 2.0923076923076924,
"grad_norm": 0.6436755061149597,
"learning_rate": 0.001008957654723127,
"loss": 2.2464,
"step": 952
},
{
"epoch": 2.0945054945054946,
"grad_norm": 0.01867023855447769,
"learning_rate": 0.001006514657980456,
"loss": 2.3073,
"step": 953
},
{
"epoch": 2.096703296703297,
"grad_norm": 0.04052765294909477,
"learning_rate": 0.001004071661237785,
"loss": 2.2778,
"step": 954
},
{
"epoch": 2.098901098901099,
"grad_norm": 0.01885545626282692,
"learning_rate": 0.001001628664495114,
"loss": 2.0173,
"step": 955
},
{
"epoch": 2.101098901098901,
"grad_norm": 0.0724204033613205,
"learning_rate": 0.000999185667752443,
"loss": 2.2258,
"step": 956
},
{
"epoch": 2.1032967032967034,
"grad_norm": 0.032372914254665375,
"learning_rate": 0.000996742671009772,
"loss": 1.9503,
"step": 957
},
{
"epoch": 2.1054945054945056,
"grad_norm": 0.08785288035869598,
"learning_rate": 0.0009942996742671008,
"loss": 1.9547,
"step": 958
},
{
"epoch": 2.1076923076923078,
"grad_norm": 0.06754583865404129,
"learning_rate": 0.00099185667752443,
"loss": 2.0591,
"step": 959
},
{
"epoch": 2.10989010989011,
"grad_norm": 0.05109477788209915,
"learning_rate": 0.0009894136807817588,
"loss": 2.3998,
"step": 960
},
{
"epoch": 2.112087912087912,
"grad_norm": 0.022777283564209938,
"learning_rate": 0.000986970684039088,
"loss": 2.3704,
"step": 961
},
{
"epoch": 2.1142857142857143,
"grad_norm": 0.025591352954506874,
"learning_rate": 0.0009845276872964168,
"loss": 2.2575,
"step": 962
},
{
"epoch": 2.1164835164835165,
"grad_norm": 0.033540625125169754,
"learning_rate": 0.000982084690553746,
"loss": 2.5659,
"step": 963
},
{
"epoch": 2.1186813186813187,
"grad_norm": 0.030495675280690193,
"learning_rate": 0.0009796416938110748,
"loss": 2.2484,
"step": 964
},
{
"epoch": 2.120879120879121,
"grad_norm": 0.041331950575113297,
"learning_rate": 0.000977198697068404,
"loss": 1.738,
"step": 965
},
{
"epoch": 2.123076923076923,
"grad_norm": 0.024948639795184135,
"learning_rate": 0.0009747557003257328,
"loss": 2.2673,
"step": 966
},
{
"epoch": 2.1252747252747253,
"grad_norm": 0.1429746001958847,
"learning_rate": 0.0009723127035830618,
"loss": 2.9663,
"step": 967
},
{
"epoch": 2.1274725274725275,
"grad_norm": 0.024864574894309044,
"learning_rate": 0.0009698697068403908,
"loss": 1.9134,
"step": 968
},
{
"epoch": 2.1296703296703297,
"grad_norm": 0.018605245277285576,
"learning_rate": 0.0009674267100977198,
"loss": 1.8088,
"step": 969
},
{
"epoch": 2.131868131868132,
"grad_norm": 0.015809573233127594,
"learning_rate": 0.0009649837133550488,
"loss": 2.0848,
"step": 970
},
{
"epoch": 2.134065934065934,
"grad_norm": 0.030008208006620407,
"learning_rate": 0.0009625407166123778,
"loss": 2.03,
"step": 971
},
{
"epoch": 2.1362637362637362,
"grad_norm": 0.017309488728642464,
"learning_rate": 0.0009600977198697067,
"loss": 2.6465,
"step": 972
},
{
"epoch": 2.1384615384615384,
"grad_norm": 0.022745635360479355,
"learning_rate": 0.0009576547231270357,
"loss": 2.1206,
"step": 973
},
{
"epoch": 2.1406593406593406,
"grad_norm": 0.03800744563341141,
"learning_rate": 0.0009552117263843647,
"loss": 2.0333,
"step": 974
},
{
"epoch": 2.142857142857143,
"grad_norm": 0.017299920320510864,
"learning_rate": 0.0009527687296416937,
"loss": 2.3047,
"step": 975
},
{
"epoch": 2.145054945054945,
"grad_norm": 0.016663186252117157,
"learning_rate": 0.0009503257328990227,
"loss": 2.5083,
"step": 976
},
{
"epoch": 2.147252747252747,
"grad_norm": 0.025417210534214973,
"learning_rate": 0.0009478827361563517,
"loss": 1.9675,
"step": 977
},
{
"epoch": 2.1494505494505494,
"grad_norm": 0.012701483443379402,
"learning_rate": 0.0009454397394136807,
"loss": 2.3404,
"step": 978
},
{
"epoch": 2.1516483516483516,
"grad_norm": 0.030810954049229622,
"learning_rate": 0.0009429967426710097,
"loss": 2.6621,
"step": 979
},
{
"epoch": 2.1538461538461537,
"grad_norm": 0.0301175769418478,
"learning_rate": 0.0009405537459283387,
"loss": 2.1158,
"step": 980
},
{
"epoch": 2.156043956043956,
"grad_norm": 0.02648276276886463,
"learning_rate": 0.0009381107491856677,
"loss": 2.7864,
"step": 981
},
{
"epoch": 2.158241758241758,
"grad_norm": 0.03547356650233269,
"learning_rate": 0.0009356677524429969,
"loss": 2.0175,
"step": 982
},
{
"epoch": 2.1604395604395603,
"grad_norm": 0.15452906489372253,
"learning_rate": 0.0009332247557003259,
"loss": 1.973,
"step": 983
},
{
"epoch": 2.1626373626373625,
"grad_norm": 0.017352167516946793,
"learning_rate": 0.0009307817589576549,
"loss": 2.0069,
"step": 984
},
{
"epoch": 2.1648351648351647,
"grad_norm": 0.32898038625717163,
"learning_rate": 0.0009283387622149839,
"loss": 2.8361,
"step": 985
},
{
"epoch": 2.167032967032967,
"grad_norm": 0.02025616355240345,
"learning_rate": 0.0009258957654723129,
"loss": 2.206,
"step": 986
},
{
"epoch": 2.169230769230769,
"grad_norm": 0.09368988871574402,
"learning_rate": 0.0009234527687296417,
"loss": 2.0939,
"step": 987
},
{
"epoch": 2.1714285714285713,
"grad_norm": 0.019261635839939117,
"learning_rate": 0.0009210097719869707,
"loss": 1.7468,
"step": 988
},
{
"epoch": 2.1736263736263735,
"grad_norm": 0.035792261362075806,
"learning_rate": 0.0009185667752442997,
"loss": 2.3156,
"step": 989
},
{
"epoch": 2.1758241758241756,
"grad_norm": 0.023065293207764626,
"learning_rate": 0.0009161237785016287,
"loss": 2.1447,
"step": 990
},
{
"epoch": 2.178021978021978,
"grad_norm": 0.19556093215942383,
"learning_rate": 0.0009136807817589577,
"loss": 2.1857,
"step": 991
},
{
"epoch": 2.18021978021978,
"grad_norm": 0.6604458093643188,
"learning_rate": 0.0009112377850162867,
"loss": 2.6722,
"step": 992
},
{
"epoch": 2.182417582417582,
"grad_norm": 0.017780927941203117,
"learning_rate": 0.0009087947882736157,
"loss": 2.1105,
"step": 993
},
{
"epoch": 2.184615384615385,
"grad_norm": 0.020711194723844528,
"learning_rate": 0.0009063517915309447,
"loss": 2.1841,
"step": 994
},
{
"epoch": 2.186813186813187,
"grad_norm": 0.27132880687713623,
"learning_rate": 0.0009039087947882737,
"loss": 2.0183,
"step": 995
},
{
"epoch": 2.1890109890109892,
"grad_norm": 0.020140541717410088,
"learning_rate": 0.0009014657980456026,
"loss": 1.8655,
"step": 996
},
{
"epoch": 2.1912087912087914,
"grad_norm": 0.02801445499062538,
"learning_rate": 0.0008990228013029316,
"loss": 2.6941,
"step": 997
},
{
"epoch": 2.1934065934065936,
"grad_norm": 0.034429922699928284,
"learning_rate": 0.0008965798045602606,
"loss": 2.3619,
"step": 998
},
{
"epoch": 2.195604395604396,
"grad_norm": 0.171352818608284,
"learning_rate": 0.0008941368078175896,
"loss": 2.6459,
"step": 999
},
{
"epoch": 2.197802197802198,
"grad_norm": 0.032546110451221466,
"learning_rate": 0.0008916938110749186,
"loss": 2.8486,
"step": 1000
},
{
"epoch": 2.2,
"grad_norm": 0.1757802665233612,
"learning_rate": 0.0008892508143322476,
"loss": 2.5169,
"step": 1001
},
{
"epoch": 2.2021978021978024,
"grad_norm": 0.03305770084261894,
"learning_rate": 0.0008868078175895766,
"loss": 2.0337,
"step": 1002
},
{
"epoch": 2.2043956043956046,
"grad_norm": 0.012092269957065582,
"learning_rate": 0.0008843648208469056,
"loss": 2.1652,
"step": 1003
},
{
"epoch": 2.2065934065934067,
"grad_norm": 0.022016610950231552,
"learning_rate": 0.0008819218241042346,
"loss": 2.3439,
"step": 1004
},
{
"epoch": 2.208791208791209,
"grad_norm": 0.025056952610611916,
"learning_rate": 0.0008794788273615636,
"loss": 2.6486,
"step": 1005
},
{
"epoch": 2.210989010989011,
"grad_norm": 0.026040343567728996,
"learning_rate": 0.0008770358306188925,
"loss": 2.0671,
"step": 1006
},
{
"epoch": 2.2131868131868133,
"grad_norm": 0.018100637942552567,
"learning_rate": 0.0008745928338762215,
"loss": 1.8159,
"step": 1007
},
{
"epoch": 2.2153846153846155,
"grad_norm": 0.07870277762413025,
"learning_rate": 0.0008721498371335505,
"loss": 2.2211,
"step": 1008
},
{
"epoch": 2.2175824175824177,
"grad_norm": 0.023609375581145287,
"learning_rate": 0.0008697068403908795,
"loss": 2.3758,
"step": 1009
},
{
"epoch": 2.21978021978022,
"grad_norm": 0.04135296121239662,
"learning_rate": 0.0008672638436482085,
"loss": 2.4836,
"step": 1010
},
{
"epoch": 2.221978021978022,
"grad_norm": 0.11649942398071289,
"learning_rate": 0.0008648208469055375,
"loss": 2.2185,
"step": 1011
},
{
"epoch": 2.2241758241758243,
"grad_norm": 0.023236358538269997,
"learning_rate": 0.0008623778501628665,
"loss": 2.3966,
"step": 1012
},
{
"epoch": 2.2263736263736265,
"grad_norm": 0.024484330788254738,
"learning_rate": 0.0008599348534201955,
"loss": 2.577,
"step": 1013
},
{
"epoch": 2.2285714285714286,
"grad_norm": 0.034407611936330795,
"learning_rate": 0.0008574918566775245,
"loss": 2.212,
"step": 1014
},
{
"epoch": 2.230769230769231,
"grad_norm": 0.08883630484342575,
"learning_rate": 0.0008550488599348534,
"loss": 2.069,
"step": 1015
},
{
"epoch": 2.232967032967033,
"grad_norm": 0.014713692478835583,
"learning_rate": 0.0008526058631921824,
"loss": 2.0397,
"step": 1016
},
{
"epoch": 2.235164835164835,
"grad_norm": 0.014511177316308022,
"learning_rate": 0.0008501628664495114,
"loss": 1.9505,
"step": 1017
},
{
"epoch": 2.2373626373626374,
"grad_norm": 0.10143587738275528,
"learning_rate": 0.0008477198697068404,
"loss": 1.8576,
"step": 1018
},
{
"epoch": 2.2395604395604396,
"grad_norm": 0.03063739649951458,
"learning_rate": 0.0008452768729641694,
"loss": 1.7271,
"step": 1019
},
{
"epoch": 2.241758241758242,
"grad_norm": 0.02525077387690544,
"learning_rate": 0.0008428338762214984,
"loss": 2.1094,
"step": 1020
},
{
"epoch": 2.243956043956044,
"grad_norm": 0.023172611370682716,
"learning_rate": 0.0008403908794788274,
"loss": 1.927,
"step": 1021
},
{
"epoch": 2.246153846153846,
"grad_norm": 0.04752547666430473,
"learning_rate": 0.0008379478827361564,
"loss": 2.1749,
"step": 1022
},
{
"epoch": 2.2483516483516484,
"grad_norm": 0.021002616733312607,
"learning_rate": 0.0008355048859934854,
"loss": 2.348,
"step": 1023
},
{
"epoch": 2.2505494505494505,
"grad_norm": 0.032305002212524414,
"learning_rate": 0.0008330618892508144,
"loss": 2.3822,
"step": 1024
},
{
"epoch": 2.2527472527472527,
"grad_norm": 0.06962234526872635,
"learning_rate": 0.0008306188925081433,
"loss": 2.1547,
"step": 1025
},
{
"epoch": 2.254945054945055,
"grad_norm": 0.021579857915639877,
"learning_rate": 0.0008281758957654723,
"loss": 1.8966,
"step": 1026
},
{
"epoch": 2.257142857142857,
"grad_norm": 0.02182813733816147,
"learning_rate": 0.0008257328990228013,
"loss": 2.1496,
"step": 1027
},
{
"epoch": 2.2593406593406593,
"grad_norm": 0.020311882719397545,
"learning_rate": 0.0008232899022801303,
"loss": 2.0434,
"step": 1028
},
{
"epoch": 2.2615384615384615,
"grad_norm": 0.06265679746866226,
"learning_rate": 0.0008208469055374593,
"loss": 2.3803,
"step": 1029
},
{
"epoch": 2.2637362637362637,
"grad_norm": 0.017879702150821686,
"learning_rate": 0.0008184039087947883,
"loss": 2.3496,
"step": 1030
},
{
"epoch": 2.265934065934066,
"grad_norm": 0.06738996505737305,
"learning_rate": 0.0008159609120521173,
"loss": 2.0807,
"step": 1031
},
{
"epoch": 2.268131868131868,
"grad_norm": 0.04000239819288254,
"learning_rate": 0.0008135179153094463,
"loss": 2.3641,
"step": 1032
},
{
"epoch": 2.2703296703296703,
"grad_norm": 0.18763373792171478,
"learning_rate": 0.0008110749185667753,
"loss": 1.7835,
"step": 1033
},
{
"epoch": 2.2725274725274724,
"grad_norm": 0.037085503339767456,
"learning_rate": 0.0008086319218241043,
"loss": 2.4118,
"step": 1034
},
{
"epoch": 2.2747252747252746,
"grad_norm": 0.020353613421320915,
"learning_rate": 0.0008061889250814332,
"loss": 2.467,
"step": 1035
},
{
"epoch": 2.276923076923077,
"grad_norm": 0.13606126606464386,
"learning_rate": 0.0008037459283387622,
"loss": 2.2033,
"step": 1036
},
{
"epoch": 2.279120879120879,
"grad_norm": 0.017082877457141876,
"learning_rate": 0.0008013029315960912,
"loss": 1.9229,
"step": 1037
},
{
"epoch": 2.281318681318681,
"grad_norm": 0.015368496999144554,
"learning_rate": 0.0007988599348534202,
"loss": 2.1967,
"step": 1038
},
{
"epoch": 2.2835164835164834,
"grad_norm": 0.03120042383670807,
"learning_rate": 0.0007964169381107492,
"loss": 2.5124,
"step": 1039
},
{
"epoch": 2.2857142857142856,
"grad_norm": 0.028278667479753494,
"learning_rate": 0.0007939739413680782,
"loss": 2.0228,
"step": 1040
},
{
"epoch": 2.2879120879120878,
"grad_norm": 0.07085943222045898,
"learning_rate": 0.0007915309446254072,
"loss": 2.2719,
"step": 1041
},
{
"epoch": 2.29010989010989,
"grad_norm": 0.019980639219284058,
"learning_rate": 0.0007890879478827362,
"loss": 1.793,
"step": 1042
},
{
"epoch": 2.292307692307692,
"grad_norm": 0.06123548001050949,
"learning_rate": 0.0007866449511400652,
"loss": 2.1984,
"step": 1043
},
{
"epoch": 2.2945054945054943,
"grad_norm": 0.018567821010947227,
"learning_rate": 0.0007842019543973941,
"loss": 2.0134,
"step": 1044
},
{
"epoch": 2.2967032967032965,
"grad_norm": 0.0491684190928936,
"learning_rate": 0.0007817589576547231,
"loss": 2.4889,
"step": 1045
},
{
"epoch": 2.2989010989010987,
"grad_norm": 0.04751253128051758,
"learning_rate": 0.0007793159609120521,
"loss": 2.3208,
"step": 1046
},
{
"epoch": 2.301098901098901,
"grad_norm": 0.03208129480481148,
"learning_rate": 0.0007768729641693811,
"loss": 2.3515,
"step": 1047
},
{
"epoch": 2.303296703296703,
"grad_norm": 0.021453483030200005,
"learning_rate": 0.0007744299674267101,
"loss": 1.7509,
"step": 1048
},
{
"epoch": 2.3054945054945053,
"grad_norm": 0.031210191547870636,
"learning_rate": 0.0007719869706840391,
"loss": 2.3176,
"step": 1049
},
{
"epoch": 2.3076923076923075,
"grad_norm": 0.029393186792731285,
"learning_rate": 0.0007695439739413681,
"loss": 2.0466,
"step": 1050
},
{
"epoch": 2.3098901098901097,
"grad_norm": 0.04653501510620117,
"learning_rate": 0.0007671009771986971,
"loss": 2.5514,
"step": 1051
},
{
"epoch": 2.312087912087912,
"grad_norm": 0.03497583046555519,
"learning_rate": 0.0007646579804560261,
"loss": 2.4589,
"step": 1052
},
{
"epoch": 2.314285714285714,
"grad_norm": 0.12523996829986572,
"learning_rate": 0.0007622149837133551,
"loss": 2.0632,
"step": 1053
},
{
"epoch": 2.3164835164835162,
"grad_norm": 0.02273179404437542,
"learning_rate": 0.000759771986970684,
"loss": 2.1711,
"step": 1054
},
{
"epoch": 2.3186813186813184,
"grad_norm": 0.016500329598784447,
"learning_rate": 0.000757328990228013,
"loss": 2.1037,
"step": 1055
},
{
"epoch": 2.320879120879121,
"grad_norm": 0.02357000857591629,
"learning_rate": 0.000754885993485342,
"loss": 1.9282,
"step": 1056
},
{
"epoch": 2.3230769230769233,
"grad_norm": 0.03277996554970741,
"learning_rate": 0.000752442996742671,
"loss": 2.0931,
"step": 1057
},
{
"epoch": 2.3252747252747255,
"grad_norm": 0.02139952778816223,
"learning_rate": 0.00075,
"loss": 2.3292,
"step": 1058
},
{
"epoch": 2.3274725274725276,
"grad_norm": 0.027038948610424995,
"learning_rate": 0.000747557003257329,
"loss": 1.6996,
"step": 1059
},
{
"epoch": 2.32967032967033,
"grad_norm": 0.02045472338795662,
"learning_rate": 0.000745114006514658,
"loss": 2.3615,
"step": 1060
},
{
"epoch": 2.331868131868132,
"grad_norm": 0.033699143677949905,
"learning_rate": 0.000742671009771987,
"loss": 2.4691,
"step": 1061
},
{
"epoch": 2.334065934065934,
"grad_norm": 0.0791887417435646,
"learning_rate": 0.000740228013029316,
"loss": 2.6602,
"step": 1062
},
{
"epoch": 2.3362637362637364,
"grad_norm": 0.019991060718894005,
"learning_rate": 0.0007377850162866449,
"loss": 2.1424,
"step": 1063
},
{
"epoch": 2.3384615384615386,
"grad_norm": 0.11026094108819962,
"learning_rate": 0.0007353420195439739,
"loss": 3.0145,
"step": 1064
},
{
"epoch": 2.340659340659341,
"grad_norm": 0.03374775871634483,
"learning_rate": 0.0007328990228013029,
"loss": 1.7981,
"step": 1065
},
{
"epoch": 2.342857142857143,
"grad_norm": 0.016445454210042953,
"learning_rate": 0.0007304560260586319,
"loss": 2.3823,
"step": 1066
},
{
"epoch": 2.345054945054945,
"grad_norm": 0.024085653945803642,
"learning_rate": 0.0007280130293159609,
"loss": 1.7073,
"step": 1067
},
{
"epoch": 2.3472527472527474,
"grad_norm": 0.01844780147075653,
"learning_rate": 0.0007255700325732899,
"loss": 2.5267,
"step": 1068
},
{
"epoch": 2.3494505494505495,
"grad_norm": 0.02193845994770527,
"learning_rate": 0.0007231270358306189,
"loss": 2.4644,
"step": 1069
},
{
"epoch": 2.3516483516483517,
"grad_norm": 0.05524541810154915,
"learning_rate": 0.0007206840390879479,
"loss": 1.8594,
"step": 1070
},
{
"epoch": 2.353846153846154,
"grad_norm": 0.019786234945058823,
"learning_rate": 0.0007182410423452769,
"loss": 2.6032,
"step": 1071
},
{
"epoch": 2.356043956043956,
"grad_norm": 0.16935895383358002,
"learning_rate": 0.0007157980456026059,
"loss": 2.0864,
"step": 1072
},
{
"epoch": 2.3582417582417583,
"grad_norm": 0.01915956288576126,
"learning_rate": 0.0007133550488599348,
"loss": 1.8782,
"step": 1073
},
{
"epoch": 2.3604395604395605,
"grad_norm": 0.09769978374242783,
"learning_rate": 0.0007109120521172638,
"loss": 1.9658,
"step": 1074
},
{
"epoch": 2.3626373626373627,
"grad_norm": 0.039156049489974976,
"learning_rate": 0.0007084690553745928,
"loss": 2.2761,
"step": 1075
},
{
"epoch": 2.364835164835165,
"grad_norm": 0.16330170631408691,
"learning_rate": 0.0007060260586319218,
"loss": 2.2775,
"step": 1076
},
{
"epoch": 2.367032967032967,
"grad_norm": 0.018183862790465355,
"learning_rate": 0.0007035830618892508,
"loss": 2.2143,
"step": 1077
},
{
"epoch": 2.3692307692307693,
"grad_norm": 0.015218627639114857,
"learning_rate": 0.0007011400651465798,
"loss": 1.6759,
"step": 1078
},
{
"epoch": 2.3714285714285714,
"grad_norm": 0.019439134746789932,
"learning_rate": 0.0006986970684039088,
"loss": 2.4219,
"step": 1079
},
{
"epoch": 2.3736263736263736,
"grad_norm": 0.025525616481900215,
"learning_rate": 0.0006962540716612378,
"loss": 2.7973,
"step": 1080
},
{
"epoch": 2.375824175824176,
"grad_norm": 0.10858825594186783,
"learning_rate": 0.0006938110749185668,
"loss": 1.8571,
"step": 1081
},
{
"epoch": 2.378021978021978,
"grad_norm": 0.1639280617237091,
"learning_rate": 0.0006913680781758957,
"loss": 3.2559,
"step": 1082
},
{
"epoch": 2.38021978021978,
"grad_norm": 0.018650470301508904,
"learning_rate": 0.0006889250814332247,
"loss": 2.2097,
"step": 1083
},
{
"epoch": 2.3824175824175824,
"grad_norm": 0.03490409255027771,
"learning_rate": 0.0006864820846905537,
"loss": 1.9935,
"step": 1084
},
{
"epoch": 2.3846153846153846,
"grad_norm": 0.04397901892662048,
"learning_rate": 0.0006840390879478827,
"loss": 1.73,
"step": 1085
},
{
"epoch": 2.3868131868131868,
"grad_norm": 0.2024208903312683,
"learning_rate": 0.0006815960912052117,
"loss": 2.0637,
"step": 1086
},
{
"epoch": 2.389010989010989,
"grad_norm": 0.02881256863474846,
"learning_rate": 0.0006791530944625407,
"loss": 2.0445,
"step": 1087
},
{
"epoch": 2.391208791208791,
"grad_norm": 0.01821977272629738,
"learning_rate": 0.0006767100977198697,
"loss": 2.4916,
"step": 1088
},
{
"epoch": 2.3934065934065933,
"grad_norm": 0.01707719825208187,
"learning_rate": 0.0006742671009771987,
"loss": 1.7255,
"step": 1089
},
{
"epoch": 2.3956043956043955,
"grad_norm": 0.04452083259820938,
"learning_rate": 0.0006718241042345277,
"loss": 2.3199,
"step": 1090
},
{
"epoch": 2.3978021978021977,
"grad_norm": 0.025469297543168068,
"learning_rate": 0.0006693811074918567,
"loss": 2.2024,
"step": 1091
},
{
"epoch": 2.4,
"grad_norm": 0.10781490802764893,
"learning_rate": 0.0006669381107491856,
"loss": 2.1945,
"step": 1092
},
{
"epoch": 2.402197802197802,
"grad_norm": 0.1381528377532959,
"learning_rate": 0.0006644951140065146,
"loss": 2.3102,
"step": 1093
},
{
"epoch": 2.4043956043956043,
"grad_norm": 0.2585447132587433,
"learning_rate": 0.0006620521172638436,
"loss": 2.0901,
"step": 1094
},
{
"epoch": 2.4065934065934065,
"grad_norm": 0.08736145496368408,
"learning_rate": 0.0006596091205211726,
"loss": 1.9559,
"step": 1095
},
{
"epoch": 2.4087912087912087,
"grad_norm": 0.02142792008817196,
"learning_rate": 0.0006571661237785016,
"loss": 2.4357,
"step": 1096
},
{
"epoch": 2.410989010989011,
"grad_norm": 0.024515246972441673,
"learning_rate": 0.0006547231270358307,
"loss": 1.8731,
"step": 1097
},
{
"epoch": 2.413186813186813,
"grad_norm": 0.05693626403808594,
"learning_rate": 0.0006522801302931597,
"loss": 2.8637,
"step": 1098
},
{
"epoch": 2.4153846153846152,
"grad_norm": 0.022110939025878906,
"learning_rate": 0.0006498371335504886,
"loss": 1.8728,
"step": 1099
},
{
"epoch": 2.4175824175824174,
"grad_norm": 0.023071501404047012,
"learning_rate": 0.0006473941368078176,
"loss": 1.9849,
"step": 1100
},
{
"epoch": 2.4197802197802196,
"grad_norm": 0.042560216039419174,
"learning_rate": 0.0006449511400651466,
"loss": 2.5147,
"step": 1101
},
{
"epoch": 2.421978021978022,
"grad_norm": 0.014156820252537727,
"learning_rate": 0.0006425081433224756,
"loss": 2.593,
"step": 1102
},
{
"epoch": 2.424175824175824,
"grad_norm": 0.23964636027812958,
"learning_rate": 0.0006400651465798046,
"loss": 2.0475,
"step": 1103
},
{
"epoch": 2.426373626373626,
"grad_norm": 0.05007048323750496,
"learning_rate": 0.0006376221498371336,
"loss": 2.4595,
"step": 1104
},
{
"epoch": 2.4285714285714284,
"grad_norm": 0.01953619159758091,
"learning_rate": 0.0006351791530944626,
"loss": 2.2627,
"step": 1105
},
{
"epoch": 2.430769230769231,
"grad_norm": 0.0204556155949831,
"learning_rate": 0.0006327361563517916,
"loss": 1.6794,
"step": 1106
},
{
"epoch": 2.432967032967033,
"grad_norm": 0.25034299492836,
"learning_rate": 0.0006302931596091206,
"loss": 2.3747,
"step": 1107
},
{
"epoch": 2.4351648351648354,
"grad_norm": 0.33934664726257324,
"learning_rate": 0.0006278501628664496,
"loss": 2.4024,
"step": 1108
},
{
"epoch": 2.4373626373626376,
"grad_norm": 0.08209378272294998,
"learning_rate": 0.0006254071661237785,
"loss": 1.9851,
"step": 1109
},
{
"epoch": 2.4395604395604398,
"grad_norm": 0.09340900927782059,
"learning_rate": 0.0006229641693811075,
"loss": 2.0049,
"step": 1110
},
{
"epoch": 2.441758241758242,
"grad_norm": 0.043822742998600006,
"learning_rate": 0.0006205211726384365,
"loss": 2.1964,
"step": 1111
},
{
"epoch": 2.443956043956044,
"grad_norm": 0.01804087497293949,
"learning_rate": 0.0006180781758957655,
"loss": 1.904,
"step": 1112
},
{
"epoch": 2.4461538461538463,
"grad_norm": 0.04729655757546425,
"learning_rate": 0.0006156351791530945,
"loss": 1.8983,
"step": 1113
},
{
"epoch": 2.4483516483516485,
"grad_norm": 0.017072033137083054,
"learning_rate": 0.0006131921824104235,
"loss": 1.8249,
"step": 1114
},
{
"epoch": 2.4505494505494507,
"grad_norm": 0.4561885893344879,
"learning_rate": 0.0006107491856677525,
"loss": 2.3848,
"step": 1115
},
{
"epoch": 2.452747252747253,
"grad_norm": 0.021676648408174515,
"learning_rate": 0.0006083061889250815,
"loss": 1.9692,
"step": 1116
},
{
"epoch": 2.454945054945055,
"grad_norm": 0.02246515080332756,
"learning_rate": 0.0006058631921824105,
"loss": 2.0598,
"step": 1117
},
{
"epoch": 2.4571428571428573,
"grad_norm": 0.03541149944067001,
"learning_rate": 0.0006034201954397394,
"loss": 2.1699,
"step": 1118
},
{
"epoch": 2.4593406593406595,
"grad_norm": 0.024743009358644485,
"learning_rate": 0.0006009771986970684,
"loss": 1.5794,
"step": 1119
},
{
"epoch": 2.4615384615384617,
"grad_norm": 0.036429256200790405,
"learning_rate": 0.0005985342019543974,
"loss": 2.7602,
"step": 1120
},
{
"epoch": 2.463736263736264,
"grad_norm": 0.020981837064027786,
"learning_rate": 0.0005960912052117264,
"loss": 1.6983,
"step": 1121
},
{
"epoch": 2.465934065934066,
"grad_norm": 0.031617872416973114,
"learning_rate": 0.0005936482084690554,
"loss": 2.5745,
"step": 1122
},
{
"epoch": 2.4681318681318682,
"grad_norm": 0.01715952716767788,
"learning_rate": 0.0005912052117263844,
"loss": 2.1793,
"step": 1123
},
{
"epoch": 2.4703296703296704,
"grad_norm": 0.02634182758629322,
"learning_rate": 0.0005887622149837134,
"loss": 2.363,
"step": 1124
},
{
"epoch": 2.4725274725274726,
"grad_norm": 0.06788533180952072,
"learning_rate": 0.0005863192182410424,
"loss": 2.0425,
"step": 1125
},
{
"epoch": 2.474725274725275,
"grad_norm": 0.16127295792102814,
"learning_rate": 0.0005838762214983714,
"loss": 2.0705,
"step": 1126
},
{
"epoch": 2.476923076923077,
"grad_norm": 0.036855120211839676,
"learning_rate": 0.0005814332247557004,
"loss": 2.7375,
"step": 1127
},
{
"epoch": 2.479120879120879,
"grad_norm": 0.08741381764411926,
"learning_rate": 0.0005789902280130293,
"loss": 2.2416,
"step": 1128
},
{
"epoch": 2.4813186813186814,
"grad_norm": 0.01814926043152809,
"learning_rate": 0.0005765472312703583,
"loss": 1.7528,
"step": 1129
},
{
"epoch": 2.4835164835164836,
"grad_norm": 0.02123294025659561,
"learning_rate": 0.0005741042345276873,
"loss": 2.6556,
"step": 1130
},
{
"epoch": 2.4857142857142858,
"grad_norm": 0.1875857561826706,
"learning_rate": 0.0005716612377850163,
"loss": 2.6161,
"step": 1131
},
{
"epoch": 2.487912087912088,
"grad_norm": 0.09761584550142288,
"learning_rate": 0.0005692182410423453,
"loss": 2.0832,
"step": 1132
},
{
"epoch": 2.49010989010989,
"grad_norm": 0.38173776865005493,
"learning_rate": 0.0005667752442996743,
"loss": 1.9017,
"step": 1133
},
{
"epoch": 2.4923076923076923,
"grad_norm": 0.021909886971116066,
"learning_rate": 0.0005643322475570033,
"loss": 2.2198,
"step": 1134
},
{
"epoch": 2.4945054945054945,
"grad_norm": 0.018010200932621956,
"learning_rate": 0.0005618892508143323,
"loss": 1.7638,
"step": 1135
},
{
"epoch": 2.4967032967032967,
"grad_norm": 0.024755030870437622,
"learning_rate": 0.0005594462540716613,
"loss": 1.8397,
"step": 1136
},
{
"epoch": 2.498901098901099,
"grad_norm": 0.019606519490480423,
"learning_rate": 0.0005570032573289903,
"loss": 1.8727,
"step": 1137
},
{
"epoch": 2.501098901098901,
"grad_norm": 0.018381360918283463,
"learning_rate": 0.0005545602605863192,
"loss": 1.941,
"step": 1138
},
{
"epoch": 2.5032967032967033,
"grad_norm": 0.0816660076379776,
"learning_rate": 0.0005521172638436482,
"loss": 2.1064,
"step": 1139
},
{
"epoch": 2.5054945054945055,
"grad_norm": 0.32827526330947876,
"learning_rate": 0.0005496742671009772,
"loss": 1.8675,
"step": 1140
},
{
"epoch": 2.5076923076923077,
"grad_norm": 0.023354550823569298,
"learning_rate": 0.0005472312703583062,
"loss": 2.1746,
"step": 1141
},
{
"epoch": 2.50989010989011,
"grad_norm": 0.015941981226205826,
"learning_rate": 0.0005447882736156352,
"loss": 2.1068,
"step": 1142
},
{
"epoch": 2.512087912087912,
"grad_norm": 0.023049544543027878,
"learning_rate": 0.0005423452768729642,
"loss": 2.2118,
"step": 1143
},
{
"epoch": 2.5142857142857142,
"grad_norm": 0.302200585603714,
"learning_rate": 0.0005399022801302932,
"loss": 2.1986,
"step": 1144
},
{
"epoch": 2.5164835164835164,
"grad_norm": 0.01780596375465393,
"learning_rate": 0.0005374592833876222,
"loss": 2.4626,
"step": 1145
},
{
"epoch": 2.5186813186813186,
"grad_norm": 0.018557853996753693,
"learning_rate": 0.0005350162866449512,
"loss": 1.666,
"step": 1146
},
{
"epoch": 2.520879120879121,
"grad_norm": 0.019323579967021942,
"learning_rate": 0.0005325732899022801,
"loss": 2.1807,
"step": 1147
},
{
"epoch": 2.523076923076923,
"grad_norm": 0.01950707472860813,
"learning_rate": 0.0005301302931596091,
"loss": 2.0463,
"step": 1148
},
{
"epoch": 2.525274725274725,
"grad_norm": 0.08034268766641617,
"learning_rate": 0.0005276872964169381,
"loss": 1.8622,
"step": 1149
},
{
"epoch": 2.5274725274725274,
"grad_norm": 0.28123190999031067,
"learning_rate": 0.0005252442996742671,
"loss": 2.4116,
"step": 1150
},
{
"epoch": 2.5296703296703296,
"grad_norm": 0.07596886903047562,
"learning_rate": 0.0005228013029315961,
"loss": 1.9389,
"step": 1151
},
{
"epoch": 2.5318681318681318,
"grad_norm": 0.02835419774055481,
"learning_rate": 0.0005203583061889251,
"loss": 1.8127,
"step": 1152
},
{
"epoch": 2.534065934065934,
"grad_norm": 0.017699172720313072,
"learning_rate": 0.0005179153094462541,
"loss": 2.3435,
"step": 1153
},
{
"epoch": 2.536263736263736,
"grad_norm": 0.015968574211001396,
"learning_rate": 0.0005154723127035831,
"loss": 2.3494,
"step": 1154
},
{
"epoch": 2.5384615384615383,
"grad_norm": 0.4923339784145355,
"learning_rate": 0.0005130293159609121,
"loss": 2.3174,
"step": 1155
},
{
"epoch": 2.5406593406593405,
"grad_norm": 0.023088594898581505,
"learning_rate": 0.0005105863192182411,
"loss": 1.8438,
"step": 1156
},
{
"epoch": 2.5428571428571427,
"grad_norm": 0.017642833292484283,
"learning_rate": 0.00050814332247557,
"loss": 1.9567,
"step": 1157
},
{
"epoch": 2.545054945054945,
"grad_norm": 0.021682027727365494,
"learning_rate": 0.000505700325732899,
"loss": 1.7914,
"step": 1158
},
{
"epoch": 2.547252747252747,
"grad_norm": 0.056267570704221725,
"learning_rate": 0.000503257328990228,
"loss": 2.0431,
"step": 1159
},
{
"epoch": 2.5494505494505493,
"grad_norm": 0.0474892221391201,
"learning_rate": 0.000500814332247557,
"loss": 2.2188,
"step": 1160
},
{
"epoch": 2.5516483516483515,
"grad_norm": 0.024913696572184563,
"learning_rate": 0.000498371335504886,
"loss": 1.7267,
"step": 1161
},
{
"epoch": 2.5538461538461537,
"grad_norm": 0.018828613683581352,
"learning_rate": 0.000495928338762215,
"loss": 1.7384,
"step": 1162
},
{
"epoch": 2.556043956043956,
"grad_norm": 0.01967066153883934,
"learning_rate": 0.000493485342019544,
"loss": 2.4271,
"step": 1163
},
{
"epoch": 2.558241758241758,
"grad_norm": 0.07820383459329605,
"learning_rate": 0.000491042345276873,
"loss": 2.6909,
"step": 1164
},
{
"epoch": 2.5604395604395602,
"grad_norm": 0.019133983179926872,
"learning_rate": 0.000488599348534202,
"loss": 2.6616,
"step": 1165
},
{
"epoch": 2.5626373626373624,
"grad_norm": 0.029354924336075783,
"learning_rate": 0.0004861563517915309,
"loss": 2.1336,
"step": 1166
},
{
"epoch": 2.5648351648351646,
"grad_norm": 0.026281390339136124,
"learning_rate": 0.0004837133550488599,
"loss": 1.8722,
"step": 1167
},
{
"epoch": 2.567032967032967,
"grad_norm": 1.1854610443115234,
"learning_rate": 0.0004812703583061889,
"loss": 2.5861,
"step": 1168
},
{
"epoch": 2.569230769230769,
"grad_norm": 0.03550174832344055,
"learning_rate": 0.00047882736156351787,
"loss": 2.3188,
"step": 1169
},
{
"epoch": 2.571428571428571,
"grad_norm": 0.03186529874801636,
"learning_rate": 0.00047638436482084687,
"loss": 1.8674,
"step": 1170
},
{
"epoch": 2.5736263736263734,
"grad_norm": 0.05145696923136711,
"learning_rate": 0.00047394136807817587,
"loss": 2.145,
"step": 1171
},
{
"epoch": 2.5758241758241756,
"grad_norm": 1.1942601203918457,
"learning_rate": 0.00047149837133550487,
"loss": 1.8577,
"step": 1172
},
{
"epoch": 2.5780219780219777,
"grad_norm": 0.015856759622693062,
"learning_rate": 0.00046905537459283387,
"loss": 2.5326,
"step": 1173
},
{
"epoch": 2.5802197802197804,
"grad_norm": 0.01741120032966137,
"learning_rate": 0.0004666123778501629,
"loss": 2.6871,
"step": 1174
},
{
"epoch": 2.5824175824175826,
"grad_norm": 0.05078648775815964,
"learning_rate": 0.0004641693811074919,
"loss": 2.7886,
"step": 1175
},
{
"epoch": 2.5846153846153848,
"grad_norm": 0.11807391047477722,
"learning_rate": 0.00046172638436482087,
"loss": 2.0174,
"step": 1176
},
{
"epoch": 2.586813186813187,
"grad_norm": 0.022055989131331444,
"learning_rate": 0.00045928338762214987,
"loss": 2.2263,
"step": 1177
},
{
"epoch": 2.589010989010989,
"grad_norm": 0.02734105847775936,
"learning_rate": 0.0004568403908794789,
"loss": 1.6556,
"step": 1178
},
{
"epoch": 2.5912087912087913,
"grad_norm": 0.016780052334070206,
"learning_rate": 0.0004543973941368079,
"loss": 2.0143,
"step": 1179
},
{
"epoch": 2.5934065934065935,
"grad_norm": 0.020183373242616653,
"learning_rate": 0.0004519543973941369,
"loss": 1.8828,
"step": 1180
},
{
"epoch": 2.5956043956043957,
"grad_norm": 0.019724395126104355,
"learning_rate": 0.0004495114006514658,
"loss": 2.0518,
"step": 1181
},
{
"epoch": 2.597802197802198,
"grad_norm": 0.06273704767227173,
"learning_rate": 0.0004470684039087948,
"loss": 1.9788,
"step": 1182
},
{
"epoch": 2.6,
"grad_norm": 0.029615221545100212,
"learning_rate": 0.0004446254071661238,
"loss": 1.9543,
"step": 1183
},
{
"epoch": 2.6021978021978023,
"grad_norm": 0.03680985048413277,
"learning_rate": 0.0004421824104234528,
"loss": 1.7988,
"step": 1184
},
{
"epoch": 2.6043956043956045,
"grad_norm": 0.15047632157802582,
"learning_rate": 0.0004397394136807818,
"loss": 1.9736,
"step": 1185
},
{
"epoch": 2.6065934065934067,
"grad_norm": 0.02670225128531456,
"learning_rate": 0.00043729641693811077,
"loss": 2.3161,
"step": 1186
},
{
"epoch": 2.608791208791209,
"grad_norm": 0.019969085231423378,
"learning_rate": 0.00043485342019543977,
"loss": 2.1077,
"step": 1187
},
{
"epoch": 2.610989010989011,
"grad_norm": 0.0188743956387043,
"learning_rate": 0.00043241042345276877,
"loss": 2.6289,
"step": 1188
},
{
"epoch": 2.6131868131868132,
"grad_norm": 0.016851289197802544,
"learning_rate": 0.00042996742671009777,
"loss": 1.8462,
"step": 1189
},
{
"epoch": 2.6153846153846154,
"grad_norm": 0.030047070235013962,
"learning_rate": 0.0004275244299674267,
"loss": 1.9838,
"step": 1190
},
{
"epoch": 2.6175824175824176,
"grad_norm": 0.05769174173474312,
"learning_rate": 0.0004250814332247557,
"loss": 1.745,
"step": 1191
},
{
"epoch": 2.61978021978022,
"grad_norm": 0.33946049213409424,
"learning_rate": 0.0004226384364820847,
"loss": 1.8019,
"step": 1192
},
{
"epoch": 2.621978021978022,
"grad_norm": 0.03362296149134636,
"learning_rate": 0.0004201954397394137,
"loss": 1.9588,
"step": 1193
},
{
"epoch": 2.624175824175824,
"grad_norm": 0.02462005615234375,
"learning_rate": 0.0004177524429967427,
"loss": 1.7282,
"step": 1194
},
{
"epoch": 2.6263736263736264,
"grad_norm": 0.060299281030893326,
"learning_rate": 0.00041530944625407166,
"loss": 1.9364,
"step": 1195
},
{
"epoch": 2.6285714285714286,
"grad_norm": 0.03470919281244278,
"learning_rate": 0.00041286644951140066,
"loss": 1.5808,
"step": 1196
},
{
"epoch": 2.6307692307692307,
"grad_norm": 0.023478033021092415,
"learning_rate": 0.00041042345276872966,
"loss": 1.8539,
"step": 1197
},
{
"epoch": 2.632967032967033,
"grad_norm": 0.23020629584789276,
"learning_rate": 0.00040798045602605866,
"loss": 2.2458,
"step": 1198
},
{
"epoch": 2.635164835164835,
"grad_norm": 0.025353241711854935,
"learning_rate": 0.00040553745928338767,
"loss": 2.0651,
"step": 1199
},
{
"epoch": 2.6373626373626373,
"grad_norm": 0.02717735804617405,
"learning_rate": 0.0004030944625407166,
"loss": 1.8518,
"step": 1200
},
{
"epoch": 2.6395604395604395,
"grad_norm": 0.040590230375528336,
"learning_rate": 0.0004006514657980456,
"loss": 2.0325,
"step": 1201
},
{
"epoch": 2.6417582417582417,
"grad_norm": 0.896902859210968,
"learning_rate": 0.0003982084690553746,
"loss": 1.9476,
"step": 1202
},
{
"epoch": 2.643956043956044,
"grad_norm": 0.09795615077018738,
"learning_rate": 0.0003957654723127036,
"loss": 2.339,
"step": 1203
},
{
"epoch": 2.646153846153846,
"grad_norm": 0.45964691042900085,
"learning_rate": 0.0003933224755700326,
"loss": 1.8743,
"step": 1204
},
{
"epoch": 2.6483516483516483,
"grad_norm": 0.09152828902006149,
"learning_rate": 0.00039087947882736156,
"loss": 2.0666,
"step": 1205
},
{
"epoch": 2.6505494505494505,
"grad_norm": 0.07723715901374817,
"learning_rate": 0.00038843648208469056,
"loss": 2.3183,
"step": 1206
},
{
"epoch": 2.6527472527472526,
"grad_norm": 0.028351349756121635,
"learning_rate": 0.00038599348534201956,
"loss": 1.6815,
"step": 1207
},
{
"epoch": 2.654945054945055,
"grad_norm": 0.3026198744773865,
"learning_rate": 0.00038355048859934856,
"loss": 2.1881,
"step": 1208
},
{
"epoch": 2.657142857142857,
"grad_norm": 0.029981579631567,
"learning_rate": 0.00038110749185667756,
"loss": 2.5543,
"step": 1209
},
{
"epoch": 2.659340659340659,
"grad_norm": 0.04141581058502197,
"learning_rate": 0.0003786644951140065,
"loss": 2.1169,
"step": 1210
},
{
"epoch": 2.6615384615384614,
"grad_norm": 0.027997644618153572,
"learning_rate": 0.0003762214983713355,
"loss": 2.2373,
"step": 1211
},
{
"epoch": 2.6637362637362636,
"grad_norm": 0.017515940591692924,
"learning_rate": 0.0003737785016286645,
"loss": 2.2893,
"step": 1212
},
{
"epoch": 2.665934065934066,
"grad_norm": 0.020929738879203796,
"learning_rate": 0.0003713355048859935,
"loss": 1.6971,
"step": 1213
},
{
"epoch": 2.668131868131868,
"grad_norm": 0.017207855358719826,
"learning_rate": 0.00036889250814332245,
"loss": 2.0487,
"step": 1214
},
{
"epoch": 2.67032967032967,
"grad_norm": 0.021107299253344536,
"learning_rate": 0.00036644951140065146,
"loss": 2.1577,
"step": 1215
},
{
"epoch": 2.672527472527473,
"grad_norm": 0.01744648441672325,
"learning_rate": 0.00036400651465798046,
"loss": 1.6365,
"step": 1216
},
{
"epoch": 2.674725274725275,
"grad_norm": 0.03368677571415901,
"learning_rate": 0.00036156351791530946,
"loss": 2.0826,
"step": 1217
},
{
"epoch": 2.676923076923077,
"grad_norm": 0.03353323042392731,
"learning_rate": 0.00035912052117263846,
"loss": 1.9664,
"step": 1218
},
{
"epoch": 2.6791208791208794,
"grad_norm": 1.4358559846878052,
"learning_rate": 0.0003566775244299674,
"loss": 1.7192,
"step": 1219
},
{
"epoch": 2.6813186813186816,
"grad_norm": 0.03189322352409363,
"learning_rate": 0.0003542345276872964,
"loss": 2.5981,
"step": 1220
},
{
"epoch": 2.6835164835164838,
"grad_norm": 0.029954640194773674,
"learning_rate": 0.0003517915309446254,
"loss": 1.7111,
"step": 1221
},
{
"epoch": 2.685714285714286,
"grad_norm": 0.0488792285323143,
"learning_rate": 0.0003493485342019544,
"loss": 1.9695,
"step": 1222
},
{
"epoch": 2.687912087912088,
"grad_norm": 0.49267634749412537,
"learning_rate": 0.0003469055374592834,
"loss": 2.6528,
"step": 1223
},
{
"epoch": 2.6901098901098903,
"grad_norm": 0.10241641104221344,
"learning_rate": 0.00034446254071661235,
"loss": 2.4565,
"step": 1224
},
{
"epoch": 2.6923076923076925,
"grad_norm": 0.025878796353936195,
"learning_rate": 0.00034201954397394135,
"loss": 1.9824,
"step": 1225
},
{
"epoch": 2.6945054945054947,
"grad_norm": 0.026146840304136276,
"learning_rate": 0.00033957654723127035,
"loss": 1.9712,
"step": 1226
},
{
"epoch": 2.696703296703297,
"grad_norm": 0.013521072454750538,
"learning_rate": 0.00033713355048859935,
"loss": 1.519,
"step": 1227
},
{
"epoch": 2.698901098901099,
"grad_norm": 0.02498912252485752,
"learning_rate": 0.00033469055374592835,
"loss": 2.2715,
"step": 1228
},
{
"epoch": 2.7010989010989013,
"grad_norm": 0.01570369303226471,
"learning_rate": 0.0003322475570032573,
"loss": 2.6365,
"step": 1229
},
{
"epoch": 2.7032967032967035,
"grad_norm": 0.016601193696260452,
"learning_rate": 0.0003298045602605863,
"loss": 2.3376,
"step": 1230
},
{
"epoch": 2.7054945054945057,
"grad_norm": 0.022822808474302292,
"learning_rate": 0.00032736156351791535,
"loss": 2.3882,
"step": 1231
},
{
"epoch": 2.707692307692308,
"grad_norm": 0.026052678003907204,
"learning_rate": 0.0003249185667752443,
"loss": 2.6897,
"step": 1232
},
{
"epoch": 2.70989010989011,
"grad_norm": 0.026292255148291588,
"learning_rate": 0.0003224755700325733,
"loss": 2.1549,
"step": 1233
},
{
"epoch": 2.7120879120879122,
"grad_norm": 0.01564471423625946,
"learning_rate": 0.0003200325732899023,
"loss": 1.8834,
"step": 1234
},
{
"epoch": 2.7142857142857144,
"grad_norm": 0.09396910667419434,
"learning_rate": 0.0003175895765472313,
"loss": 2.2216,
"step": 1235
},
{
"epoch": 2.7164835164835166,
"grad_norm": 0.011964679695665836,
"learning_rate": 0.0003151465798045603,
"loss": 1.9747,
"step": 1236
},
{
"epoch": 2.718681318681319,
"grad_norm": 0.04576932638883591,
"learning_rate": 0.00031270358306188925,
"loss": 1.6865,
"step": 1237
},
{
"epoch": 2.720879120879121,
"grad_norm": 0.33960938453674316,
"learning_rate": 0.00031026058631921825,
"loss": 2.4435,
"step": 1238
},
{
"epoch": 2.723076923076923,
"grad_norm": 0.030767837539315224,
"learning_rate": 0.00030781758957654725,
"loss": 2.0168,
"step": 1239
},
{
"epoch": 2.7252747252747254,
"grad_norm": 0.026830973103642464,
"learning_rate": 0.00030537459283387625,
"loss": 2.0448,
"step": 1240
},
{
"epoch": 2.7274725274725276,
"grad_norm": 0.016159363090991974,
"learning_rate": 0.00030293159609120525,
"loss": 1.7546,
"step": 1241
},
{
"epoch": 2.7296703296703297,
"grad_norm": 0.01717989146709442,
"learning_rate": 0.0003004885993485342,
"loss": 1.7372,
"step": 1242
},
{
"epoch": 2.731868131868132,
"grad_norm": 0.016079340130090714,
"learning_rate": 0.0002980456026058632,
"loss": 2.0088,
"step": 1243
},
{
"epoch": 2.734065934065934,
"grad_norm": 0.4142027795314789,
"learning_rate": 0.0002956026058631922,
"loss": 1.9645,
"step": 1244
},
{
"epoch": 2.7362637362637363,
"grad_norm": 0.01705585978925228,
"learning_rate": 0.0002931596091205212,
"loss": 2.0157,
"step": 1245
},
{
"epoch": 2.7384615384615385,
"grad_norm": 0.037198908627033234,
"learning_rate": 0.0002907166123778502,
"loss": 2.271,
"step": 1246
},
{
"epoch": 2.7406593406593407,
"grad_norm": 0.023273080587387085,
"learning_rate": 0.00028827361563517914,
"loss": 2.0727,
"step": 1247
},
{
"epoch": 2.742857142857143,
"grad_norm": 0.06325782835483551,
"learning_rate": 0.00028583061889250814,
"loss": 1.9242,
"step": 1248
},
{
"epoch": 2.745054945054945,
"grad_norm": 0.023594971746206284,
"learning_rate": 0.00028338762214983714,
"loss": 2.45,
"step": 1249
},
{
"epoch": 2.7472527472527473,
"grad_norm": 1.1633131504058838,
"learning_rate": 0.00028094462540716614,
"loss": 1.7905,
"step": 1250
},
{
"epoch": 2.7494505494505495,
"grad_norm": 0.02339518442749977,
"learning_rate": 0.00027850162866449514,
"loss": 1.891,
"step": 1251
},
{
"epoch": 2.7516483516483516,
"grad_norm": 0.021431194618344307,
"learning_rate": 0.0002760586319218241,
"loss": 2.227,
"step": 1252
},
{
"epoch": 2.753846153846154,
"grad_norm": 0.14125725626945496,
"learning_rate": 0.0002736156351791531,
"loss": 2.1368,
"step": 1253
},
{
"epoch": 2.756043956043956,
"grad_norm": 0.017897093668580055,
"learning_rate": 0.0002711726384364821,
"loss": 2.5328,
"step": 1254
},
{
"epoch": 2.758241758241758,
"grad_norm": 0.017033038660883904,
"learning_rate": 0.0002687296416938111,
"loss": 2.3112,
"step": 1255
},
{
"epoch": 2.7604395604395604,
"grad_norm": 0.04279283806681633,
"learning_rate": 0.00026628664495114004,
"loss": 2.0006,
"step": 1256
},
{
"epoch": 2.7626373626373626,
"grad_norm": 0.2133704125881195,
"learning_rate": 0.00026384364820846904,
"loss": 2.1246,
"step": 1257
},
{
"epoch": 2.764835164835165,
"grad_norm": 0.023095613345503807,
"learning_rate": 0.00026140065146579804,
"loss": 1.8854,
"step": 1258
},
{
"epoch": 2.767032967032967,
"grad_norm": 0.03857843577861786,
"learning_rate": 0.00025895765472312704,
"loss": 2.4098,
"step": 1259
},
{
"epoch": 2.769230769230769,
"grad_norm": 0.025368189439177513,
"learning_rate": 0.00025651465798045604,
"loss": 2.3476,
"step": 1260
},
{
"epoch": 2.7714285714285714,
"grad_norm": 0.04637822136282921,
"learning_rate": 0.000254071661237785,
"loss": 2.0036,
"step": 1261
},
{
"epoch": 2.7736263736263735,
"grad_norm": 0.016725296154618263,
"learning_rate": 0.000251628664495114,
"loss": 1.6547,
"step": 1262
},
{
"epoch": 2.7758241758241757,
"grad_norm": 0.01870262436568737,
"learning_rate": 0.000249185667752443,
"loss": 1.9844,
"step": 1263
},
{
"epoch": 2.778021978021978,
"grad_norm": 0.018502378836274147,
"learning_rate": 0.000246742671009772,
"loss": 1.7467,
"step": 1264
},
{
"epoch": 2.78021978021978,
"grad_norm": 0.02853255532681942,
"learning_rate": 0.000244299674267101,
"loss": 2.0955,
"step": 1265
},
{
"epoch": 2.7824175824175823,
"grad_norm": 0.02429346553981304,
"learning_rate": 0.00024185667752442996,
"loss": 2.1786,
"step": 1266
},
{
"epoch": 2.7846153846153845,
"grad_norm": 0.018234720453619957,
"learning_rate": 0.00023941368078175893,
"loss": 1.824,
"step": 1267
},
{
"epoch": 2.7868131868131867,
"grad_norm": 0.016290804371237755,
"learning_rate": 0.00023697068403908794,
"loss": 2.4966,
"step": 1268
},
{
"epoch": 2.789010989010989,
"grad_norm": 0.8355116248130798,
"learning_rate": 0.00023452768729641694,
"loss": 2.9563,
"step": 1269
},
{
"epoch": 2.791208791208791,
"grad_norm": 0.026579709723591805,
"learning_rate": 0.00023208469055374596,
"loss": 2.0736,
"step": 1270
},
{
"epoch": 2.7934065934065933,
"grad_norm": 0.019901353865861893,
"learning_rate": 0.00022964169381107494,
"loss": 2.1482,
"step": 1271
},
{
"epoch": 2.7956043956043954,
"grad_norm": 0.024729322642087936,
"learning_rate": 0.00022719869706840394,
"loss": 2.4802,
"step": 1272
},
{
"epoch": 2.7978021978021976,
"grad_norm": 0.7790870666503906,
"learning_rate": 0.0002247557003257329,
"loss": 2.2293,
"step": 1273
},
{
"epoch": 2.8,
"grad_norm": 0.01835259050130844,
"learning_rate": 0.0002223127035830619,
"loss": 1.773,
"step": 1274
},
{
"epoch": 2.802197802197802,
"grad_norm": 0.3025789260864258,
"learning_rate": 0.0002198697068403909,
"loss": 2.4628,
"step": 1275
},
{
"epoch": 2.804395604395604,
"grad_norm": 0.2479473054409027,
"learning_rate": 0.00021742671009771988,
"loss": 2.0163,
"step": 1276
},
{
"epoch": 2.8065934065934064,
"grad_norm": 0.020701352506875992,
"learning_rate": 0.00021498371335504888,
"loss": 2.1362,
"step": 1277
},
{
"epoch": 2.8087912087912086,
"grad_norm": 0.04084041714668274,
"learning_rate": 0.00021254071661237786,
"loss": 2.0937,
"step": 1278
},
{
"epoch": 2.8109890109890108,
"grad_norm": 0.015458572655916214,
"learning_rate": 0.00021009771986970686,
"loss": 2.0925,
"step": 1279
},
{
"epoch": 2.813186813186813,
"grad_norm": 0.32743901014328003,
"learning_rate": 0.00020765472312703583,
"loss": 2.4536,
"step": 1280
},
{
"epoch": 2.815384615384615,
"grad_norm": 0.019182078540325165,
"learning_rate": 0.00020521172638436483,
"loss": 1.9248,
"step": 1281
},
{
"epoch": 2.8175824175824173,
"grad_norm": 0.019378086552023888,
"learning_rate": 0.00020276872964169383,
"loss": 2.1814,
"step": 1282
},
{
"epoch": 2.8197802197802195,
"grad_norm": 0.02037365920841694,
"learning_rate": 0.0002003257328990228,
"loss": 2.218,
"step": 1283
},
{
"epoch": 2.8219780219780217,
"grad_norm": 0.02443603426218033,
"learning_rate": 0.0001978827361563518,
"loss": 2.1283,
"step": 1284
},
{
"epoch": 2.824175824175824,
"grad_norm": 0.09678750485181808,
"learning_rate": 0.00019543973941368078,
"loss": 2.1176,
"step": 1285
},
{
"epoch": 2.826373626373626,
"grad_norm": 0.026256950572133064,
"learning_rate": 0.00019299674267100978,
"loss": 2.0389,
"step": 1286
},
{
"epoch": 2.8285714285714287,
"grad_norm": 0.05324495956301689,
"learning_rate": 0.00019055374592833878,
"loss": 1.954,
"step": 1287
},
{
"epoch": 2.830769230769231,
"grad_norm": 0.0423358790576458,
"learning_rate": 0.00018811074918566775,
"loss": 1.7475,
"step": 1288
},
{
"epoch": 2.832967032967033,
"grad_norm": 0.05832844227552414,
"learning_rate": 0.00018566775244299675,
"loss": 2.3601,
"step": 1289
},
{
"epoch": 2.8351648351648353,
"grad_norm": 0.019444286823272705,
"learning_rate": 0.00018322475570032573,
"loss": 1.8148,
"step": 1290
},
{
"epoch": 2.8373626373626375,
"grad_norm": 0.03657885640859604,
"learning_rate": 0.00018078175895765473,
"loss": 2.5102,
"step": 1291
},
{
"epoch": 2.8395604395604397,
"grad_norm": 0.03269704431295395,
"learning_rate": 0.0001783387622149837,
"loss": 1.7185,
"step": 1292
},
{
"epoch": 2.841758241758242,
"grad_norm": 0.022930298000574112,
"learning_rate": 0.0001758957654723127,
"loss": 2.7526,
"step": 1293
},
{
"epoch": 2.843956043956044,
"grad_norm": 0.031252775341272354,
"learning_rate": 0.0001734527687296417,
"loss": 2.5103,
"step": 1294
},
{
"epoch": 2.8461538461538463,
"grad_norm": 0.16519050300121307,
"learning_rate": 0.00017100977198697068,
"loss": 2.877,
"step": 1295
},
{
"epoch": 2.8483516483516484,
"grad_norm": 0.6687852740287781,
"learning_rate": 0.00016856677524429968,
"loss": 1.831,
"step": 1296
},
{
"epoch": 2.8505494505494506,
"grad_norm": 0.8024818301200867,
"learning_rate": 0.00016612377850162865,
"loss": 2.079,
"step": 1297
},
{
"epoch": 2.852747252747253,
"grad_norm": 0.020230107009410858,
"learning_rate": 0.00016368078175895768,
"loss": 1.912,
"step": 1298
},
{
"epoch": 2.854945054945055,
"grad_norm": 0.02708737552165985,
"learning_rate": 0.00016123778501628665,
"loss": 2.6146,
"step": 1299
},
{
"epoch": 2.857142857142857,
"grad_norm": 0.016026277095079422,
"learning_rate": 0.00015879478827361565,
"loss": 1.507,
"step": 1300
},
{
"epoch": 2.8593406593406594,
"grad_norm": 0.04007168859243393,
"learning_rate": 0.00015635179153094462,
"loss": 2.8279,
"step": 1301
},
{
"epoch": 2.8615384615384616,
"grad_norm": 0.029475565999746323,
"learning_rate": 0.00015390879478827362,
"loss": 2.249,
"step": 1302
},
{
"epoch": 2.8637362637362638,
"grad_norm": 0.031006762757897377,
"learning_rate": 0.00015146579804560262,
"loss": 2.1212,
"step": 1303
},
{
"epoch": 2.865934065934066,
"grad_norm": 0.03035186231136322,
"learning_rate": 0.0001490228013029316,
"loss": 1.7688,
"step": 1304
},
{
"epoch": 2.868131868131868,
"grad_norm": 0.22374212741851807,
"learning_rate": 0.0001465798045602606,
"loss": 2.3701,
"step": 1305
},
{
"epoch": 2.8703296703296703,
"grad_norm": 0.015706244856119156,
"learning_rate": 0.00014413680781758957,
"loss": 2.3376,
"step": 1306
},
{
"epoch": 2.8725274725274725,
"grad_norm": 0.15711282193660736,
"learning_rate": 0.00014169381107491857,
"loss": 2.3315,
"step": 1307
},
{
"epoch": 2.8747252747252747,
"grad_norm": 0.016968049108982086,
"learning_rate": 0.00013925081433224757,
"loss": 2.5581,
"step": 1308
},
{
"epoch": 2.876923076923077,
"grad_norm": 0.025296228006482124,
"learning_rate": 0.00013680781758957655,
"loss": 2.5117,
"step": 1309
},
{
"epoch": 2.879120879120879,
"grad_norm": 0.028263038024306297,
"learning_rate": 0.00013436482084690555,
"loss": 2.2757,
"step": 1310
},
{
"epoch": 2.8813186813186813,
"grad_norm": 0.03322409838438034,
"learning_rate": 0.00013192182410423452,
"loss": 1.898,
"step": 1311
},
{
"epoch": 2.8835164835164835,
"grad_norm": 0.027632728219032288,
"learning_rate": 0.00012947882736156352,
"loss": 2.0732,
"step": 1312
},
{
"epoch": 2.8857142857142857,
"grad_norm": 0.01985703781247139,
"learning_rate": 0.0001270358306188925,
"loss": 1.809,
"step": 1313
},
{
"epoch": 2.887912087912088,
"grad_norm": 0.12070174515247345,
"learning_rate": 0.0001245928338762215,
"loss": 1.866,
"step": 1314
},
{
"epoch": 2.89010989010989,
"grad_norm": 0.020842405036091805,
"learning_rate": 0.0001221498371335505,
"loss": 1.8494,
"step": 1315
},
{
"epoch": 2.8923076923076922,
"grad_norm": 0.01726948469877243,
"learning_rate": 0.00011970684039087947,
"loss": 2.0247,
"step": 1316
},
{
"epoch": 2.8945054945054944,
"grad_norm": 0.022260282188653946,
"learning_rate": 0.00011726384364820847,
"loss": 1.6466,
"step": 1317
},
{
"epoch": 2.8967032967032966,
"grad_norm": 0.027915339916944504,
"learning_rate": 0.00011482084690553747,
"loss": 2.3843,
"step": 1318
},
{
"epoch": 2.898901098901099,
"grad_norm": 0.13879603147506714,
"learning_rate": 0.00011237785016286646,
"loss": 2.0264,
"step": 1319
},
{
"epoch": 2.901098901098901,
"grad_norm": 0.021872397512197495,
"learning_rate": 0.00010993485342019546,
"loss": 2.4769,
"step": 1320
},
{
"epoch": 2.903296703296703,
"grad_norm": 0.048628222197294235,
"learning_rate": 0.00010749185667752444,
"loss": 2.0072,
"step": 1321
},
{
"epoch": 2.9054945054945054,
"grad_norm": 0.02103383280336857,
"learning_rate": 0.00010504885993485343,
"loss": 1.9071,
"step": 1322
},
{
"epoch": 2.9076923076923076,
"grad_norm": 0.02836519666016102,
"learning_rate": 0.00010260586319218242,
"loss": 2.3599,
"step": 1323
},
{
"epoch": 2.9098901098901098,
"grad_norm": 0.19837768375873566,
"learning_rate": 0.0001001628664495114,
"loss": 1.86,
"step": 1324
},
{
"epoch": 2.912087912087912,
"grad_norm": 0.03971497714519501,
"learning_rate": 9.771986970684039e-05,
"loss": 1.9216,
"step": 1325
},
{
"epoch": 2.914285714285714,
"grad_norm": 0.01591861993074417,
"learning_rate": 9.527687296416939e-05,
"loss": 1.934,
"step": 1326
},
{
"epoch": 2.9164835164835163,
"grad_norm": 0.03482022136449814,
"learning_rate": 9.283387622149838e-05,
"loss": 2.2152,
"step": 1327
},
{
"epoch": 2.9186813186813185,
"grad_norm": 0.027939923107624054,
"learning_rate": 9.039087947882736e-05,
"loss": 1.8374,
"step": 1328
},
{
"epoch": 2.9208791208791207,
"grad_norm": 0.14090220630168915,
"learning_rate": 8.794788273615635e-05,
"loss": 2.7423,
"step": 1329
},
{
"epoch": 2.9230769230769234,
"grad_norm": 0.023117514327168465,
"learning_rate": 8.550488599348534e-05,
"loss": 2.4763,
"step": 1330
},
{
"epoch": 2.9252747252747255,
"grad_norm": 0.01546783372759819,
"learning_rate": 8.306188925081432e-05,
"loss": 1.6211,
"step": 1331
},
{
"epoch": 2.9274725274725277,
"grad_norm": 0.019776495173573494,
"learning_rate": 8.061889250814332e-05,
"loss": 2.6507,
"step": 1332
},
{
"epoch": 2.92967032967033,
"grad_norm": 0.021463599056005478,
"learning_rate": 7.817589576547231e-05,
"loss": 2.8542,
"step": 1333
},
{
"epoch": 2.931868131868132,
"grad_norm": 0.04794270917773247,
"learning_rate": 7.573289902280131e-05,
"loss": 2.1187,
"step": 1334
},
{
"epoch": 2.9340659340659343,
"grad_norm": 0.014464852400124073,
"learning_rate": 7.32899022801303e-05,
"loss": 1.9158,
"step": 1335
},
{
"epoch": 2.9362637362637365,
"grad_norm": 0.01880934089422226,
"learning_rate": 7.084690553745929e-05,
"loss": 1.6961,
"step": 1336
},
{
"epoch": 2.9384615384615387,
"grad_norm": 0.018340017646551132,
"learning_rate": 6.840390879478827e-05,
"loss": 1.6909,
"step": 1337
},
{
"epoch": 2.940659340659341,
"grad_norm": 0.06631717085838318,
"learning_rate": 6.596091205211726e-05,
"loss": 1.9832,
"step": 1338
},
{
"epoch": 2.942857142857143,
"grad_norm": 0.40452468395233154,
"learning_rate": 6.351791530944625e-05,
"loss": 2.2785,
"step": 1339
},
{
"epoch": 2.9450549450549453,
"grad_norm": 0.021004842594265938,
"learning_rate": 6.107491856677525e-05,
"loss": 2.228,
"step": 1340
},
{
"epoch": 2.9472527472527474,
"grad_norm": 0.019788943231105804,
"learning_rate": 5.8631921824104234e-05,
"loss": 2.0164,
"step": 1341
},
{
"epoch": 2.9494505494505496,
"grad_norm": 0.06920367479324341,
"learning_rate": 5.618892508143323e-05,
"loss": 2.6825,
"step": 1342
},
{
"epoch": 2.951648351648352,
"grad_norm": 0.013140171766281128,
"learning_rate": 5.374592833876222e-05,
"loss": 1.8807,
"step": 1343
},
{
"epoch": 2.953846153846154,
"grad_norm": 0.06548722088336945,
"learning_rate": 5.130293159609121e-05,
"loss": 1.9688,
"step": 1344
},
{
"epoch": 2.956043956043956,
"grad_norm": 0.06962071359157562,
"learning_rate": 4.8859934853420195e-05,
"loss": 2.2312,
"step": 1345
},
{
"epoch": 2.9582417582417584,
"grad_norm": 0.25757554173469543,
"learning_rate": 4.641693811074919e-05,
"loss": 2.3257,
"step": 1346
},
{
"epoch": 2.9604395604395606,
"grad_norm": 0.02500006929039955,
"learning_rate": 4.3973941368078175e-05,
"loss": 2.2698,
"step": 1347
},
{
"epoch": 2.9626373626373628,
"grad_norm": 0.0367441363632679,
"learning_rate": 4.153094462540716e-05,
"loss": 2.3968,
"step": 1348
},
{
"epoch": 2.964835164835165,
"grad_norm": 0.03184973821043968,
"learning_rate": 3.9087947882736156e-05,
"loss": 1.615,
"step": 1349
},
{
"epoch": 2.967032967032967,
"grad_norm": 0.015663959085941315,
"learning_rate": 3.664495114006515e-05,
"loss": 2.0719,
"step": 1350
},
{
"epoch": 2.9692307692307693,
"grad_norm": 0.03774908557534218,
"learning_rate": 3.4201954397394136e-05,
"loss": 1.9451,
"step": 1351
},
{
"epoch": 2.9714285714285715,
"grad_norm": 0.020396476611495018,
"learning_rate": 3.175895765472312e-05,
"loss": 1.7849,
"step": 1352
},
{
"epoch": 2.9736263736263737,
"grad_norm": 0.0390290729701519,
"learning_rate": 2.9315960912052117e-05,
"loss": 2.1441,
"step": 1353
},
{
"epoch": 2.975824175824176,
"grad_norm": 0.04975802078843117,
"learning_rate": 2.687296416938111e-05,
"loss": 2.2175,
"step": 1354
},
{
"epoch": 2.978021978021978,
"grad_norm": 0.021007733419537544,
"learning_rate": 2.4429967426710097e-05,
"loss": 2.3487,
"step": 1355
},
{
"epoch": 2.9802197802197803,
"grad_norm": 0.031112190335989,
"learning_rate": 2.1986970684039088e-05,
"loss": 2.1525,
"step": 1356
},
{
"epoch": 2.9824175824175825,
"grad_norm": 0.025757048279047012,
"learning_rate": 1.9543973941368078e-05,
"loss": 2.3003,
"step": 1357
},
{
"epoch": 2.9846153846153847,
"grad_norm": 0.0987224355340004,
"learning_rate": 1.7100977198697068e-05,
"loss": 1.9902,
"step": 1358
},
{
"epoch": 2.986813186813187,
"grad_norm": 0.02880852483212948,
"learning_rate": 1.4657980456026058e-05,
"loss": 1.8217,
"step": 1359
},
{
"epoch": 2.989010989010989,
"grad_norm": 0.018522989004850388,
"learning_rate": 1.2214983713355049e-05,
"loss": 1.5973,
"step": 1360
},
{
"epoch": 2.9912087912087912,
"grad_norm": 0.022518757730722427,
"learning_rate": 9.771986970684039e-06,
"loss": 1.9756,
"step": 1361
},
{
"epoch": 2.9934065934065934,
"grad_norm": 0.017544256523251534,
"learning_rate": 7.328990228013029e-06,
"loss": 1.7315,
"step": 1362
},
{
"epoch": 2.9956043956043956,
"grad_norm": 0.03184521570801735,
"learning_rate": 4.8859934853420195e-06,
"loss": 1.9758,
"step": 1363
},
{
"epoch": 2.997802197802198,
"grad_norm": 0.017147239297628403,
"learning_rate": 2.4429967426710097e-06,
"loss": 2.3604,
"step": 1364
},
{
"epoch": 3.0,
"grad_norm": 1.5269135236740112,
"learning_rate": 0.0,
"loss": 2.1262,
"step": 1365
},
{
"epoch": 3.0,
"step": 1365,
"total_flos": 1.9035319978426368e+17,
"train_loss": 2.436253024195577,
"train_runtime": 2343.5965,
"train_samples_per_second": 9.319,
"train_steps_per_second": 0.582
}
],
"logging_steps": 1.0,
"max_steps": 1365,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.9035319978426368e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}