bobox's picture
Training in progress, step 328, checkpoint
4a760c7 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.4034440344403444,
"eval_steps": 21,
"global_step": 328,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0012300123001230013,
"grad_norm": 6.540346145629883,
"learning_rate": 9.803921568627452e-08,
"loss": 0.3208,
"step": 1
},
{
"epoch": 0.0024600246002460025,
"grad_norm": 5.055933475494385,
"learning_rate": 1.9607843137254904e-07,
"loss": 0.1703,
"step": 2
},
{
"epoch": 0.0036900369003690036,
"grad_norm": 6.361550331115723,
"learning_rate": 2.9411764705882356e-07,
"loss": 0.3362,
"step": 3
},
{
"epoch": 0.004920049200492005,
"grad_norm": 6.709433078765869,
"learning_rate": 3.921568627450981e-07,
"loss": 0.3346,
"step": 4
},
{
"epoch": 0.006150061500615006,
"grad_norm": 5.4415154457092285,
"learning_rate": 4.901960784313725e-07,
"loss": 0.2484,
"step": 5
},
{
"epoch": 0.007380073800738007,
"grad_norm": 5.709558010101318,
"learning_rate": 5.882352941176471e-07,
"loss": 0.2249,
"step": 6
},
{
"epoch": 0.008610086100861008,
"grad_norm": 6.553178787231445,
"learning_rate": 6.862745098039217e-07,
"loss": 0.2724,
"step": 7
},
{
"epoch": 0.00984009840098401,
"grad_norm": 5.640111446380615,
"learning_rate": 7.843137254901962e-07,
"loss": 0.251,
"step": 8
},
{
"epoch": 0.01107011070110701,
"grad_norm": 5.696380615234375,
"learning_rate": 8.823529411764707e-07,
"loss": 0.2413,
"step": 9
},
{
"epoch": 0.012300123001230012,
"grad_norm": 6.983877182006836,
"learning_rate": 9.80392156862745e-07,
"loss": 0.382,
"step": 10
},
{
"epoch": 0.013530135301353014,
"grad_norm": 6.066723346710205,
"learning_rate": 1.0784313725490197e-06,
"loss": 0.2695,
"step": 11
},
{
"epoch": 0.014760147601476014,
"grad_norm": 5.643115520477295,
"learning_rate": 1.1764705882352942e-06,
"loss": 0.2392,
"step": 12
},
{
"epoch": 0.015990159901599015,
"grad_norm": 6.062892436981201,
"learning_rate": 1.2745098039215686e-06,
"loss": 0.3603,
"step": 13
},
{
"epoch": 0.017220172201722016,
"grad_norm": 6.2491655349731445,
"learning_rate": 1.3725490196078434e-06,
"loss": 0.3282,
"step": 14
},
{
"epoch": 0.01845018450184502,
"grad_norm": 6.1164398193359375,
"learning_rate": 1.4705882352941177e-06,
"loss": 0.2878,
"step": 15
},
{
"epoch": 0.01968019680196802,
"grad_norm": 5.676611423492432,
"learning_rate": 1.5686274509803923e-06,
"loss": 0.3046,
"step": 16
},
{
"epoch": 0.020910209102091022,
"grad_norm": 7.181272983551025,
"learning_rate": 1.6666666666666667e-06,
"loss": 0.3946,
"step": 17
},
{
"epoch": 0.02214022140221402,
"grad_norm": 5.430984020233154,
"learning_rate": 1.7647058823529414e-06,
"loss": 0.2038,
"step": 18
},
{
"epoch": 0.023370233702337023,
"grad_norm": 7.2283220291137695,
"learning_rate": 1.8627450980392158e-06,
"loss": 0.3542,
"step": 19
},
{
"epoch": 0.024600246002460024,
"grad_norm": 5.587338924407959,
"learning_rate": 1.96078431372549e-06,
"loss": 0.2369,
"step": 20
},
{
"epoch": 0.025830258302583026,
"grad_norm": 4.456090927124023,
"learning_rate": 2.058823529411765e-06,
"loss": 0.1967,
"step": 21
},
{
"epoch": 0.025830258302583026,
"eval_loss": 0.14506277441978455,
"eval_runtime": 54.872,
"eval_samples_per_second": 27.446,
"eval_steps_per_second": 0.219,
"eval_sts-test_pearson_cosine": 0.8860152816653839,
"eval_sts-test_pearson_dot": 0.8766503125978379,
"eval_sts-test_pearson_euclidean": 0.9084101290541164,
"eval_sts-test_pearson_manhattan": 0.909121525028934,
"eval_sts-test_pearson_max": 0.909121525028934,
"eval_sts-test_spearman_cosine": 0.9080919696366193,
"eval_sts-test_spearman_dot": 0.8799434709726907,
"eval_sts-test_spearman_euclidean": 0.9044399981995129,
"eval_sts-test_spearman_manhattan": 0.9048055712538192,
"eval_sts-test_spearman_max": 0.9080919696366193,
"step": 21
},
{
"epoch": 0.02706027060270603,
"grad_norm": 6.088884353637695,
"learning_rate": 2.1568627450980393e-06,
"loss": 0.2368,
"step": 22
},
{
"epoch": 0.028290282902829027,
"grad_norm": 5.354013919830322,
"learning_rate": 2.254901960784314e-06,
"loss": 0.263,
"step": 23
},
{
"epoch": 0.02952029520295203,
"grad_norm": 7.822023391723633,
"learning_rate": 2.3529411764705885e-06,
"loss": 0.3595,
"step": 24
},
{
"epoch": 0.03075030750307503,
"grad_norm": 6.401333332061768,
"learning_rate": 2.450980392156863e-06,
"loss": 0.3073,
"step": 25
},
{
"epoch": 0.03198031980319803,
"grad_norm": 5.567343235015869,
"learning_rate": 2.549019607843137e-06,
"loss": 0.2232,
"step": 26
},
{
"epoch": 0.033210332103321034,
"grad_norm": 4.244979381561279,
"learning_rate": 2.647058823529412e-06,
"loss": 0.1822,
"step": 27
},
{
"epoch": 0.03444034440344403,
"grad_norm": 5.674376964569092,
"learning_rate": 2.7450980392156867e-06,
"loss": 0.251,
"step": 28
},
{
"epoch": 0.03567035670356704,
"grad_norm": 6.017494201660156,
"learning_rate": 2.843137254901961e-06,
"loss": 0.2677,
"step": 29
},
{
"epoch": 0.03690036900369004,
"grad_norm": 6.415028095245361,
"learning_rate": 2.9411764705882355e-06,
"loss": 0.3252,
"step": 30
},
{
"epoch": 0.038130381303813035,
"grad_norm": 5.484204292297363,
"learning_rate": 3.03921568627451e-06,
"loss": 0.2058,
"step": 31
},
{
"epoch": 0.03936039360393604,
"grad_norm": 5.997295379638672,
"learning_rate": 3.1372549019607846e-06,
"loss": 0.3083,
"step": 32
},
{
"epoch": 0.04059040590405904,
"grad_norm": 5.527047157287598,
"learning_rate": 3.2352941176470594e-06,
"loss": 0.2109,
"step": 33
},
{
"epoch": 0.041820418204182044,
"grad_norm": 5.817302227020264,
"learning_rate": 3.3333333333333333e-06,
"loss": 0.2751,
"step": 34
},
{
"epoch": 0.04305043050430504,
"grad_norm": 5.476433753967285,
"learning_rate": 3.431372549019608e-06,
"loss": 0.2269,
"step": 35
},
{
"epoch": 0.04428044280442804,
"grad_norm": 5.363610744476318,
"learning_rate": 3.529411764705883e-06,
"loss": 0.2333,
"step": 36
},
{
"epoch": 0.04551045510455105,
"grad_norm": 6.07395601272583,
"learning_rate": 3.6274509803921573e-06,
"loss": 0.2747,
"step": 37
},
{
"epoch": 0.046740467404674045,
"grad_norm": 4.726163864135742,
"learning_rate": 3.7254901960784316e-06,
"loss": 0.1285,
"step": 38
},
{
"epoch": 0.04797047970479705,
"grad_norm": 5.783392906188965,
"learning_rate": 3.8235294117647055e-06,
"loss": 0.3659,
"step": 39
},
{
"epoch": 0.04920049200492005,
"grad_norm": 6.566931247711182,
"learning_rate": 3.92156862745098e-06,
"loss": 0.3991,
"step": 40
},
{
"epoch": 0.05043050430504305,
"grad_norm": 5.311452388763428,
"learning_rate": 4.019607843137255e-06,
"loss": 0.2647,
"step": 41
},
{
"epoch": 0.05166051660516605,
"grad_norm": 6.0737152099609375,
"learning_rate": 4.11764705882353e-06,
"loss": 0.3627,
"step": 42
},
{
"epoch": 0.05166051660516605,
"eval_loss": 0.1373225301504135,
"eval_runtime": 54.8187,
"eval_samples_per_second": 27.472,
"eval_steps_per_second": 0.219,
"eval_sts-test_pearson_cosine": 0.8846111050777101,
"eval_sts-test_pearson_dot": 0.8747554197498655,
"eval_sts-test_pearson_euclidean": 0.9089352149126115,
"eval_sts-test_pearson_manhattan": 0.9098483550214526,
"eval_sts-test_pearson_max": 0.9098483550214526,
"eval_sts-test_spearman_cosine": 0.9084485029361248,
"eval_sts-test_spearman_dot": 0.8796038088987298,
"eval_sts-test_spearman_euclidean": 0.9055790073044468,
"eval_sts-test_spearman_manhattan": 0.9063848432683216,
"eval_sts-test_spearman_max": 0.9084485029361248,
"step": 42
},
{
"epoch": 0.05289052890528905,
"grad_norm": 4.857839584350586,
"learning_rate": 4.215686274509805e-06,
"loss": 0.2026,
"step": 43
},
{
"epoch": 0.05412054120541206,
"grad_norm": 5.248873233795166,
"learning_rate": 4.313725490196079e-06,
"loss": 0.1923,
"step": 44
},
{
"epoch": 0.055350553505535055,
"grad_norm": 5.329862117767334,
"learning_rate": 4.411764705882353e-06,
"loss": 0.2369,
"step": 45
},
{
"epoch": 0.056580565805658053,
"grad_norm": 5.581146240234375,
"learning_rate": 4.509803921568628e-06,
"loss": 0.2268,
"step": 46
},
{
"epoch": 0.05781057810578106,
"grad_norm": 5.818411350250244,
"learning_rate": 4.607843137254902e-06,
"loss": 0.2975,
"step": 47
},
{
"epoch": 0.05904059040590406,
"grad_norm": 5.096602916717529,
"learning_rate": 4.705882352941177e-06,
"loss": 0.1922,
"step": 48
},
{
"epoch": 0.06027060270602706,
"grad_norm": 5.256355285644531,
"learning_rate": 4.803921568627452e-06,
"loss": 0.1906,
"step": 49
},
{
"epoch": 0.06150061500615006,
"grad_norm": 5.3927388191223145,
"learning_rate": 4.901960784313726e-06,
"loss": 0.2379,
"step": 50
},
{
"epoch": 0.06273062730627306,
"grad_norm": 6.2723846435546875,
"learning_rate": 5e-06,
"loss": 0.3796,
"step": 51
},
{
"epoch": 0.06396063960639606,
"grad_norm": 4.595238208770752,
"learning_rate": 5.098039215686274e-06,
"loss": 0.1821,
"step": 52
},
{
"epoch": 0.06519065190651907,
"grad_norm": 4.342020511627197,
"learning_rate": 5.19607843137255e-06,
"loss": 0.1257,
"step": 53
},
{
"epoch": 0.06642066420664207,
"grad_norm": 4.998225212097168,
"learning_rate": 5.294117647058824e-06,
"loss": 0.2368,
"step": 54
},
{
"epoch": 0.06765067650676507,
"grad_norm": 5.510946273803711,
"learning_rate": 5.392156862745098e-06,
"loss": 0.294,
"step": 55
},
{
"epoch": 0.06888068880688807,
"grad_norm": 4.788788318634033,
"learning_rate": 5.4901960784313735e-06,
"loss": 0.2594,
"step": 56
},
{
"epoch": 0.07011070110701106,
"grad_norm": 5.827020645141602,
"learning_rate": 5.588235294117647e-06,
"loss": 0.2972,
"step": 57
},
{
"epoch": 0.07134071340713408,
"grad_norm": 4.821737289428711,
"learning_rate": 5.686274509803922e-06,
"loss": 0.2297,
"step": 58
},
{
"epoch": 0.07257072570725707,
"grad_norm": 4.880247592926025,
"learning_rate": 5.784313725490197e-06,
"loss": 0.1487,
"step": 59
},
{
"epoch": 0.07380073800738007,
"grad_norm": 4.447835445404053,
"learning_rate": 5.882352941176471e-06,
"loss": 0.182,
"step": 60
},
{
"epoch": 0.07503075030750307,
"grad_norm": 5.5556640625,
"learning_rate": 5.980392156862746e-06,
"loss": 0.2516,
"step": 61
},
{
"epoch": 0.07626076260762607,
"grad_norm": 5.217922687530518,
"learning_rate": 6.07843137254902e-06,
"loss": 0.2809,
"step": 62
},
{
"epoch": 0.07749077490774908,
"grad_norm": 4.436608791351318,
"learning_rate": 6.176470588235295e-06,
"loss": 0.1371,
"step": 63
},
{
"epoch": 0.07749077490774908,
"eval_loss": 0.13080179691314697,
"eval_runtime": 54.9188,
"eval_samples_per_second": 27.422,
"eval_steps_per_second": 0.219,
"eval_sts-test_pearson_cosine": 0.882074513745531,
"eval_sts-test_pearson_dot": 0.8709046425878566,
"eval_sts-test_pearson_euclidean": 0.9081794284297221,
"eval_sts-test_pearson_manhattan": 0.9093974331692458,
"eval_sts-test_pearson_max": 0.9093974331692458,
"eval_sts-test_spearman_cosine": 0.9067824582257844,
"eval_sts-test_spearman_dot": 0.8757477717096785,
"eval_sts-test_spearman_euclidean": 0.9051085820447002,
"eval_sts-test_spearman_manhattan": 0.9064308923162935,
"eval_sts-test_spearman_max": 0.9067824582257844,
"step": 63
},
{
"epoch": 0.07872078720787208,
"grad_norm": 5.6947021484375,
"learning_rate": 6.274509803921569e-06,
"loss": 0.2149,
"step": 64
},
{
"epoch": 0.07995079950799508,
"grad_norm": 4.272282600402832,
"learning_rate": 6.372549019607843e-06,
"loss": 0.1806,
"step": 65
},
{
"epoch": 0.08118081180811808,
"grad_norm": 4.575979232788086,
"learning_rate": 6.470588235294119e-06,
"loss": 0.1458,
"step": 66
},
{
"epoch": 0.08241082410824108,
"grad_norm": 4.315216541290283,
"learning_rate": 6.568627450980393e-06,
"loss": 0.249,
"step": 67
},
{
"epoch": 0.08364083640836409,
"grad_norm": 5.67277193069458,
"learning_rate": 6.666666666666667e-06,
"loss": 0.2787,
"step": 68
},
{
"epoch": 0.08487084870848709,
"grad_norm": 5.964886665344238,
"learning_rate": 6.764705882352942e-06,
"loss": 0.288,
"step": 69
},
{
"epoch": 0.08610086100861009,
"grad_norm": 4.218502521514893,
"learning_rate": 6.862745098039216e-06,
"loss": 0.1461,
"step": 70
},
{
"epoch": 0.08733087330873308,
"grad_norm": 5.179543972015381,
"learning_rate": 6.96078431372549e-06,
"loss": 0.2304,
"step": 71
},
{
"epoch": 0.08856088560885608,
"grad_norm": 5.720668792724609,
"learning_rate": 7.058823529411766e-06,
"loss": 0.3505,
"step": 72
},
{
"epoch": 0.0897908979089791,
"grad_norm": 5.2965497970581055,
"learning_rate": 7.15686274509804e-06,
"loss": 0.2227,
"step": 73
},
{
"epoch": 0.0910209102091021,
"grad_norm": 4.685606956481934,
"learning_rate": 7.2549019607843145e-06,
"loss": 0.1746,
"step": 74
},
{
"epoch": 0.09225092250922509,
"grad_norm": 4.2930145263671875,
"learning_rate": 7.352941176470589e-06,
"loss": 0.1484,
"step": 75
},
{
"epoch": 0.09348093480934809,
"grad_norm": 3.764916181564331,
"learning_rate": 7.450980392156863e-06,
"loss": 0.1346,
"step": 76
},
{
"epoch": 0.09471094710947109,
"grad_norm": 5.033151626586914,
"learning_rate": 7.549019607843138e-06,
"loss": 0.2112,
"step": 77
},
{
"epoch": 0.0959409594095941,
"grad_norm": 5.817330837249756,
"learning_rate": 7.647058823529411e-06,
"loss": 0.3138,
"step": 78
},
{
"epoch": 0.0971709717097171,
"grad_norm": 6.147035121917725,
"learning_rate": 7.745098039215687e-06,
"loss": 0.2675,
"step": 79
},
{
"epoch": 0.0984009840098401,
"grad_norm": 5.131881237030029,
"learning_rate": 7.84313725490196e-06,
"loss": 0.2849,
"step": 80
},
{
"epoch": 0.0996309963099631,
"grad_norm": 4.2269368171691895,
"learning_rate": 7.941176470588236e-06,
"loss": 0.1719,
"step": 81
},
{
"epoch": 0.1008610086100861,
"grad_norm": 5.200590133666992,
"learning_rate": 8.03921568627451e-06,
"loss": 0.2749,
"step": 82
},
{
"epoch": 0.10209102091020911,
"grad_norm": 5.44044303894043,
"learning_rate": 8.137254901960784e-06,
"loss": 0.3097,
"step": 83
},
{
"epoch": 0.1033210332103321,
"grad_norm": 4.603049278259277,
"learning_rate": 8.23529411764706e-06,
"loss": 0.2068,
"step": 84
},
{
"epoch": 0.1033210332103321,
"eval_loss": 0.1260141134262085,
"eval_runtime": 54.8932,
"eval_samples_per_second": 27.435,
"eval_steps_per_second": 0.219,
"eval_sts-test_pearson_cosine": 0.8775839612260851,
"eval_sts-test_pearson_dot": 0.8664914414909934,
"eval_sts-test_pearson_euclidean": 0.9054210798291935,
"eval_sts-test_pearson_manhattan": 0.9069843565115414,
"eval_sts-test_pearson_max": 0.9069843565115414,
"eval_sts-test_spearman_cosine": 0.9044597335057865,
"eval_sts-test_spearman_dot": 0.872940077569982,
"eval_sts-test_spearman_euclidean": 0.9027100934391671,
"eval_sts-test_spearman_manhattan": 0.904476380975024,
"eval_sts-test_spearman_max": 0.904476380975024,
"step": 84
},
{
"epoch": 0.1045510455104551,
"grad_norm": 4.813210964202881,
"learning_rate": 8.333333333333334e-06,
"loss": 0.22,
"step": 85
},
{
"epoch": 0.1057810578105781,
"grad_norm": 4.659386157989502,
"learning_rate": 8.43137254901961e-06,
"loss": 0.2977,
"step": 86
},
{
"epoch": 0.1070110701107011,
"grad_norm": 4.895315647125244,
"learning_rate": 8.529411764705883e-06,
"loss": 0.209,
"step": 87
},
{
"epoch": 0.10824108241082411,
"grad_norm": 5.339110851287842,
"learning_rate": 8.627450980392157e-06,
"loss": 0.2215,
"step": 88
},
{
"epoch": 0.10947109471094711,
"grad_norm": 4.615406036376953,
"learning_rate": 8.725490196078433e-06,
"loss": 0.1948,
"step": 89
},
{
"epoch": 0.11070110701107011,
"grad_norm": 5.0383734703063965,
"learning_rate": 8.823529411764707e-06,
"loss": 0.2084,
"step": 90
},
{
"epoch": 0.11193111931119311,
"grad_norm": 3.9511592388153076,
"learning_rate": 8.921568627450982e-06,
"loss": 0.1823,
"step": 91
},
{
"epoch": 0.11316113161131611,
"grad_norm": 5.13690710067749,
"learning_rate": 9.019607843137256e-06,
"loss": 0.255,
"step": 92
},
{
"epoch": 0.11439114391143912,
"grad_norm": 5.1460747718811035,
"learning_rate": 9.11764705882353e-06,
"loss": 0.2675,
"step": 93
},
{
"epoch": 0.11562115621156212,
"grad_norm": 4.207213878631592,
"learning_rate": 9.215686274509804e-06,
"loss": 0.18,
"step": 94
},
{
"epoch": 0.11685116851168512,
"grad_norm": 4.802348613739014,
"learning_rate": 9.31372549019608e-06,
"loss": 0.2891,
"step": 95
},
{
"epoch": 0.11808118081180811,
"grad_norm": 4.9332966804504395,
"learning_rate": 9.411764705882354e-06,
"loss": 0.253,
"step": 96
},
{
"epoch": 0.11931119311193111,
"grad_norm": 5.841371536254883,
"learning_rate": 9.509803921568628e-06,
"loss": 0.3481,
"step": 97
},
{
"epoch": 0.12054120541205413,
"grad_norm": 3.70485782623291,
"learning_rate": 9.607843137254903e-06,
"loss": 0.1688,
"step": 98
},
{
"epoch": 0.12177121771217712,
"grad_norm": 4.415471076965332,
"learning_rate": 9.705882352941177e-06,
"loss": 0.1808,
"step": 99
},
{
"epoch": 0.12300123001230012,
"grad_norm": 5.058602809906006,
"learning_rate": 9.803921568627451e-06,
"loss": 0.2821,
"step": 100
},
{
"epoch": 0.12423124231242312,
"grad_norm": 4.303729057312012,
"learning_rate": 9.901960784313727e-06,
"loss": 0.1856,
"step": 101
},
{
"epoch": 0.12546125461254612,
"grad_norm": 4.048065185546875,
"learning_rate": 1e-05,
"loss": 0.1441,
"step": 102
},
{
"epoch": 0.12669126691266913,
"grad_norm": 4.463968753814697,
"learning_rate": 1.0098039215686275e-05,
"loss": 0.226,
"step": 103
},
{
"epoch": 0.12792127921279212,
"grad_norm": 3.401120901107788,
"learning_rate": 1.0196078431372549e-05,
"loss": 0.1662,
"step": 104
},
{
"epoch": 0.12915129151291513,
"grad_norm": 4.119345188140869,
"learning_rate": 1.0294117647058823e-05,
"loss": 0.2043,
"step": 105
},
{
"epoch": 0.12915129151291513,
"eval_loss": 0.11874283850193024,
"eval_runtime": 54.7282,
"eval_samples_per_second": 27.518,
"eval_steps_per_second": 0.219,
"eval_sts-test_pearson_cosine": 0.8767520821963045,
"eval_sts-test_pearson_dot": 0.8648481444888331,
"eval_sts-test_pearson_euclidean": 0.9053937497921556,
"eval_sts-test_pearson_manhattan": 0.9071737646452815,
"eval_sts-test_pearson_max": 0.9071737646452815,
"eval_sts-test_spearman_cosine": 0.9050983787571032,
"eval_sts-test_spearman_dot": 0.8730474805973213,
"eval_sts-test_spearman_euclidean": 0.9035385735413058,
"eval_sts-test_spearman_manhattan": 0.9054231834122819,
"eval_sts-test_spearman_max": 0.9054231834122819,
"step": 105
},
{
"epoch": 0.13038130381303814,
"grad_norm": 5.826413154602051,
"learning_rate": 1.03921568627451e-05,
"loss": 0.3907,
"step": 106
},
{
"epoch": 0.13161131611316113,
"grad_norm": 3.2629737854003906,
"learning_rate": 1.0490196078431374e-05,
"loss": 0.1332,
"step": 107
},
{
"epoch": 0.13284132841328414,
"grad_norm": 4.044755458831787,
"learning_rate": 1.0588235294117648e-05,
"loss": 0.2243,
"step": 108
},
{
"epoch": 0.13407134071340712,
"grad_norm": 3.9784040451049805,
"learning_rate": 1.0686274509803922e-05,
"loss": 0.162,
"step": 109
},
{
"epoch": 0.13530135301353013,
"grad_norm": 3.1851444244384766,
"learning_rate": 1.0784313725490196e-05,
"loss": 0.1481,
"step": 110
},
{
"epoch": 0.13653136531365315,
"grad_norm": 4.281413555145264,
"learning_rate": 1.0882352941176471e-05,
"loss": 0.2163,
"step": 111
},
{
"epoch": 0.13776137761377613,
"grad_norm": 4.62849235534668,
"learning_rate": 1.0980392156862747e-05,
"loss": 0.24,
"step": 112
},
{
"epoch": 0.13899138991389914,
"grad_norm": 3.92616868019104,
"learning_rate": 1.1078431372549021e-05,
"loss": 0.1406,
"step": 113
},
{
"epoch": 0.14022140221402213,
"grad_norm": 3.8505780696868896,
"learning_rate": 1.1176470588235295e-05,
"loss": 0.1522,
"step": 114
},
{
"epoch": 0.14145141451414514,
"grad_norm": 5.220509052276611,
"learning_rate": 1.1274509803921569e-05,
"loss": 0.2593,
"step": 115
},
{
"epoch": 0.14268142681426815,
"grad_norm": 4.459743499755859,
"learning_rate": 1.1372549019607844e-05,
"loss": 0.2426,
"step": 116
},
{
"epoch": 0.14391143911439114,
"grad_norm": 4.434360504150391,
"learning_rate": 1.1470588235294118e-05,
"loss": 0.1781,
"step": 117
},
{
"epoch": 0.14514145141451415,
"grad_norm": 4.638584613800049,
"learning_rate": 1.1568627450980394e-05,
"loss": 0.264,
"step": 118
},
{
"epoch": 0.14637146371463713,
"grad_norm": 4.5364484786987305,
"learning_rate": 1.1666666666666668e-05,
"loss": 0.1944,
"step": 119
},
{
"epoch": 0.14760147601476015,
"grad_norm": 3.597980499267578,
"learning_rate": 1.1764705882352942e-05,
"loss": 0.1341,
"step": 120
},
{
"epoch": 0.14883148831488316,
"grad_norm": 3.5174648761749268,
"learning_rate": 1.1862745098039217e-05,
"loss": 0.155,
"step": 121
},
{
"epoch": 0.15006150061500614,
"grad_norm": 4.771029949188232,
"learning_rate": 1.1960784313725491e-05,
"loss": 0.2052,
"step": 122
},
{
"epoch": 0.15129151291512916,
"grad_norm": 4.15376615524292,
"learning_rate": 1.2058823529411765e-05,
"loss": 0.2023,
"step": 123
},
{
"epoch": 0.15252152521525214,
"grad_norm": 3.5796732902526855,
"learning_rate": 1.215686274509804e-05,
"loss": 0.1519,
"step": 124
},
{
"epoch": 0.15375153751537515,
"grad_norm": 3.759777545928955,
"learning_rate": 1.2254901960784315e-05,
"loss": 0.2118,
"step": 125
},
{
"epoch": 0.15498154981549817,
"grad_norm": 4.691242218017578,
"learning_rate": 1.235294117647059e-05,
"loss": 0.2489,
"step": 126
},
{
"epoch": 0.15498154981549817,
"eval_loss": 0.11467884480953217,
"eval_runtime": 54.6969,
"eval_samples_per_second": 27.534,
"eval_steps_per_second": 0.219,
"eval_sts-test_pearson_cosine": 0.8783763084873629,
"eval_sts-test_pearson_dot": 0.8674040012483692,
"eval_sts-test_pearson_euclidean": 0.9069725735634968,
"eval_sts-test_pearson_manhattan": 0.908783443457056,
"eval_sts-test_pearson_max": 0.908783443457056,
"eval_sts-test_spearman_cosine": 0.9058364613112314,
"eval_sts-test_spearman_dot": 0.8738751104254939,
"eval_sts-test_spearman_euclidean": 0.904588080123457,
"eval_sts-test_spearman_manhattan": 0.9067583820471556,
"eval_sts-test_spearman_max": 0.9067583820471556,
"step": 126
},
{
"epoch": 0.15621156211562115,
"grad_norm": 4.087611198425293,
"learning_rate": 1.2450980392156864e-05,
"loss": 0.1988,
"step": 127
},
{
"epoch": 0.15744157441574416,
"grad_norm": 3.754612684249878,
"learning_rate": 1.2549019607843138e-05,
"loss": 0.1541,
"step": 128
},
{
"epoch": 0.15867158671586715,
"grad_norm": 3.9258835315704346,
"learning_rate": 1.2647058823529412e-05,
"loss": 0.1819,
"step": 129
},
{
"epoch": 0.15990159901599016,
"grad_norm": 3.88478422164917,
"learning_rate": 1.2745098039215686e-05,
"loss": 0.1582,
"step": 130
},
{
"epoch": 0.16113161131611317,
"grad_norm": 4.9845428466796875,
"learning_rate": 1.2843137254901964e-05,
"loss": 0.2866,
"step": 131
},
{
"epoch": 0.16236162361623616,
"grad_norm": 4.692960262298584,
"learning_rate": 1.2941176470588238e-05,
"loss": 0.2766,
"step": 132
},
{
"epoch": 0.16359163591635917,
"grad_norm": 3.9432125091552734,
"learning_rate": 1.3039215686274511e-05,
"loss": 0.1299,
"step": 133
},
{
"epoch": 0.16482164821648215,
"grad_norm": 4.439709663391113,
"learning_rate": 1.3137254901960785e-05,
"loss": 0.2558,
"step": 134
},
{
"epoch": 0.16605166051660517,
"grad_norm": 3.631169319152832,
"learning_rate": 1.323529411764706e-05,
"loss": 0.1687,
"step": 135
},
{
"epoch": 0.16728167281672818,
"grad_norm": 4.130221843719482,
"learning_rate": 1.3333333333333333e-05,
"loss": 0.173,
"step": 136
},
{
"epoch": 0.16851168511685116,
"grad_norm": 4.169937610626221,
"learning_rate": 1.3431372549019607e-05,
"loss": 0.2276,
"step": 137
},
{
"epoch": 0.16974169741697417,
"grad_norm": 4.4349751472473145,
"learning_rate": 1.3529411764705885e-05,
"loss": 0.2174,
"step": 138
},
{
"epoch": 0.17097170971709716,
"grad_norm": 4.688521862030029,
"learning_rate": 1.3627450980392158e-05,
"loss": 0.2666,
"step": 139
},
{
"epoch": 0.17220172201722017,
"grad_norm": 3.7199971675872803,
"learning_rate": 1.3725490196078432e-05,
"loss": 0.1524,
"step": 140
},
{
"epoch": 0.17343173431734318,
"grad_norm": 2.8609495162963867,
"learning_rate": 1.3823529411764706e-05,
"loss": 0.1179,
"step": 141
},
{
"epoch": 0.17466174661746617,
"grad_norm": 4.374091625213623,
"learning_rate": 1.392156862745098e-05,
"loss": 0.2475,
"step": 142
},
{
"epoch": 0.17589175891758918,
"grad_norm": 5.200084209442139,
"learning_rate": 1.4019607843137256e-05,
"loss": 0.2662,
"step": 143
},
{
"epoch": 0.17712177121771217,
"grad_norm": 3.720994710922241,
"learning_rate": 1.4117647058823532e-05,
"loss": 0.1596,
"step": 144
},
{
"epoch": 0.17835178351783518,
"grad_norm": 3.991046905517578,
"learning_rate": 1.4215686274509805e-05,
"loss": 0.2331,
"step": 145
},
{
"epoch": 0.1795817958179582,
"grad_norm": 4.76691198348999,
"learning_rate": 1.431372549019608e-05,
"loss": 0.2905,
"step": 146
},
{
"epoch": 0.18081180811808117,
"grad_norm": 3.6453163623809814,
"learning_rate": 1.4411764705882353e-05,
"loss": 0.1342,
"step": 147
},
{
"epoch": 0.18081180811808117,
"eval_loss": 0.10875426232814789,
"eval_runtime": 54.7153,
"eval_samples_per_second": 27.524,
"eval_steps_per_second": 0.219,
"eval_sts-test_pearson_cosine": 0.8768801472189502,
"eval_sts-test_pearson_dot": 0.8620776961156391,
"eval_sts-test_pearson_euclidean": 0.9073367408863471,
"eval_sts-test_pearson_manhattan": 0.9086519830687241,
"eval_sts-test_pearson_max": 0.9086519830687241,
"eval_sts-test_spearman_cosine": 0.905147068129497,
"eval_sts-test_spearman_dot": 0.869011811845045,
"eval_sts-test_spearman_euclidean": 0.9050077574527855,
"eval_sts-test_spearman_manhattan": 0.9068115017944273,
"eval_sts-test_spearman_max": 0.9068115017944273,
"step": 147
},
{
"epoch": 0.1820418204182042,
"grad_norm": 2.6028358936309814,
"learning_rate": 1.4509803921568629e-05,
"loss": 0.0839,
"step": 148
},
{
"epoch": 0.18327183271832717,
"grad_norm": 4.445943832397461,
"learning_rate": 1.4607843137254903e-05,
"loss": 0.2055,
"step": 149
},
{
"epoch": 0.18450184501845018,
"grad_norm": 4.500098705291748,
"learning_rate": 1.4705882352941179e-05,
"loss": 0.2196,
"step": 150
},
{
"epoch": 0.1857318573185732,
"grad_norm": 4.317416667938232,
"learning_rate": 1.4803921568627453e-05,
"loss": 0.2283,
"step": 151
},
{
"epoch": 0.18696186961869618,
"grad_norm": 4.395689010620117,
"learning_rate": 1.4901960784313726e-05,
"loss": 0.2105,
"step": 152
},
{
"epoch": 0.1881918819188192,
"grad_norm": 3.5757391452789307,
"learning_rate": 1.5000000000000002e-05,
"loss": 0.1534,
"step": 153
},
{
"epoch": 0.18942189421894218,
"grad_norm": 3.860861301422119,
"learning_rate": 1.5098039215686276e-05,
"loss": 0.1954,
"step": 154
},
{
"epoch": 0.1906519065190652,
"grad_norm": 3.4191622734069824,
"learning_rate": 1.519607843137255e-05,
"loss": 0.1332,
"step": 155
},
{
"epoch": 0.1918819188191882,
"grad_norm": 3.8505654335021973,
"learning_rate": 1.5294117647058822e-05,
"loss": 0.19,
"step": 156
},
{
"epoch": 0.1931119311193112,
"grad_norm": 4.127209663391113,
"learning_rate": 1.53921568627451e-05,
"loss": 0.1878,
"step": 157
},
{
"epoch": 0.1943419434194342,
"grad_norm": 3.7976646423339844,
"learning_rate": 1.5490196078431373e-05,
"loss": 0.1518,
"step": 158
},
{
"epoch": 0.19557195571955718,
"grad_norm": 4.613111972808838,
"learning_rate": 1.558823529411765e-05,
"loss": 0.1906,
"step": 159
},
{
"epoch": 0.1968019680196802,
"grad_norm": 3.911393880844116,
"learning_rate": 1.568627450980392e-05,
"loss": 0.155,
"step": 160
},
{
"epoch": 0.1980319803198032,
"grad_norm": 3.694939374923706,
"learning_rate": 1.5784313725490197e-05,
"loss": 0.1519,
"step": 161
},
{
"epoch": 0.1992619926199262,
"grad_norm": 4.334694862365723,
"learning_rate": 1.5882352941176473e-05,
"loss": 0.1726,
"step": 162
},
{
"epoch": 0.2004920049200492,
"grad_norm": 3.6630055904388428,
"learning_rate": 1.5980392156862748e-05,
"loss": 0.1618,
"step": 163
},
{
"epoch": 0.2017220172201722,
"grad_norm": 4.7789130210876465,
"learning_rate": 1.607843137254902e-05,
"loss": 0.2767,
"step": 164
},
{
"epoch": 0.2029520295202952,
"grad_norm": 4.171343803405762,
"learning_rate": 1.6176470588235296e-05,
"loss": 0.1996,
"step": 165
},
{
"epoch": 0.20418204182041821,
"grad_norm": 4.386513710021973,
"learning_rate": 1.627450980392157e-05,
"loss": 0.1907,
"step": 166
},
{
"epoch": 0.2054120541205412,
"grad_norm": 4.183532238006592,
"learning_rate": 1.6372549019607844e-05,
"loss": 0.1928,
"step": 167
},
{
"epoch": 0.2066420664206642,
"grad_norm": 3.8950257301330566,
"learning_rate": 1.647058823529412e-05,
"loss": 0.1507,
"step": 168
},
{
"epoch": 0.2066420664206642,
"eval_loss": 0.10821738839149475,
"eval_runtime": 54.7389,
"eval_samples_per_second": 27.512,
"eval_steps_per_second": 0.219,
"eval_sts-test_pearson_cosine": 0.8772991124680096,
"eval_sts-test_pearson_dot": 0.861322579093208,
"eval_sts-test_pearson_euclidean": 0.9072621517681675,
"eval_sts-test_pearson_manhattan": 0.9086600802981594,
"eval_sts-test_pearson_max": 0.9086600802981594,
"eval_sts-test_spearman_cosine": 0.9044609865411055,
"eval_sts-test_spearman_dot": 0.8661539962925903,
"eval_sts-test_spearman_euclidean": 0.904084091417667,
"eval_sts-test_spearman_manhattan": 0.9054917423447336,
"eval_sts-test_spearman_max": 0.9054917423447336,
"step": 168
},
{
"epoch": 0.2078720787207872,
"grad_norm": 4.002283096313477,
"learning_rate": 1.6568627450980395e-05,
"loss": 0.1637,
"step": 169
},
{
"epoch": 0.2091020910209102,
"grad_norm": 4.142872333526611,
"learning_rate": 1.6666666666666667e-05,
"loss": 0.1687,
"step": 170
},
{
"epoch": 0.21033210332103322,
"grad_norm": 4.345719337463379,
"learning_rate": 1.6764705882352943e-05,
"loss": 0.2181,
"step": 171
},
{
"epoch": 0.2115621156211562,
"grad_norm": 3.7364888191223145,
"learning_rate": 1.686274509803922e-05,
"loss": 0.1496,
"step": 172
},
{
"epoch": 0.21279212792127922,
"grad_norm": 4.202157974243164,
"learning_rate": 1.696078431372549e-05,
"loss": 0.1749,
"step": 173
},
{
"epoch": 0.2140221402214022,
"grad_norm": 4.639451503753662,
"learning_rate": 1.7058823529411767e-05,
"loss": 0.2374,
"step": 174
},
{
"epoch": 0.21525215252152521,
"grad_norm": 4.011781215667725,
"learning_rate": 1.715686274509804e-05,
"loss": 0.2122,
"step": 175
},
{
"epoch": 0.21648216482164823,
"grad_norm": 4.113095760345459,
"learning_rate": 1.7254901960784314e-05,
"loss": 0.1617,
"step": 176
},
{
"epoch": 0.2177121771217712,
"grad_norm": 4.0442681312561035,
"learning_rate": 1.735294117647059e-05,
"loss": 0.168,
"step": 177
},
{
"epoch": 0.21894218942189422,
"grad_norm": 4.375425338745117,
"learning_rate": 1.7450980392156866e-05,
"loss": 0.263,
"step": 178
},
{
"epoch": 0.2201722017220172,
"grad_norm": 3.2303390502929688,
"learning_rate": 1.7549019607843138e-05,
"loss": 0.1328,
"step": 179
},
{
"epoch": 0.22140221402214022,
"grad_norm": 4.832092761993408,
"learning_rate": 1.7647058823529414e-05,
"loss": 0.3157,
"step": 180
},
{
"epoch": 0.22263222632226323,
"grad_norm": 3.57254695892334,
"learning_rate": 1.7745098039215686e-05,
"loss": 0.2164,
"step": 181
},
{
"epoch": 0.22386223862238622,
"grad_norm": 3.135535717010498,
"learning_rate": 1.7843137254901965e-05,
"loss": 0.1255,
"step": 182
},
{
"epoch": 0.22509225092250923,
"grad_norm": 4.719324588775635,
"learning_rate": 1.7941176470588237e-05,
"loss": 0.2863,
"step": 183
},
{
"epoch": 0.22632226322263221,
"grad_norm": 3.8961801528930664,
"learning_rate": 1.8039215686274513e-05,
"loss": 0.155,
"step": 184
},
{
"epoch": 0.22755227552275523,
"grad_norm": 2.8389103412628174,
"learning_rate": 1.8137254901960785e-05,
"loss": 0.1271,
"step": 185
},
{
"epoch": 0.22878228782287824,
"grad_norm": 4.103536128997803,
"learning_rate": 1.823529411764706e-05,
"loss": 0.216,
"step": 186
},
{
"epoch": 0.23001230012300122,
"grad_norm": 4.006705284118652,
"learning_rate": 1.8333333333333333e-05,
"loss": 0.205,
"step": 187
},
{
"epoch": 0.23124231242312424,
"grad_norm": 3.424255847930908,
"learning_rate": 1.843137254901961e-05,
"loss": 0.1575,
"step": 188
},
{
"epoch": 0.23247232472324722,
"grad_norm": 4.568851947784424,
"learning_rate": 1.8529411764705884e-05,
"loss": 0.1939,
"step": 189
},
{
"epoch": 0.23247232472324722,
"eval_loss": 0.1056687980890274,
"eval_runtime": 54.7136,
"eval_samples_per_second": 27.525,
"eval_steps_per_second": 0.219,
"eval_sts-test_pearson_cosine": 0.8789160692756717,
"eval_sts-test_pearson_dot": 0.8639029174125306,
"eval_sts-test_pearson_euclidean": 0.9084173029414142,
"eval_sts-test_pearson_manhattan": 0.9093131544369648,
"eval_sts-test_pearson_max": 0.9093131544369648,
"eval_sts-test_spearman_cosine": 0.904571298400435,
"eval_sts-test_spearman_dot": 0.8658778810098052,
"eval_sts-test_spearman_euclidean": 0.9046812074984125,
"eval_sts-test_spearman_manhattan": 0.9056302027474785,
"eval_sts-test_spearman_max": 0.9056302027474785,
"step": 189
},
{
"epoch": 0.23370233702337023,
"grad_norm": 3.5909903049468994,
"learning_rate": 1.862745098039216e-05,
"loss": 0.2209,
"step": 190
},
{
"epoch": 0.23493234932349324,
"grad_norm": 3.443946361541748,
"learning_rate": 1.8725490196078432e-05,
"loss": 0.153,
"step": 191
},
{
"epoch": 0.23616236162361623,
"grad_norm": 3.8604445457458496,
"learning_rate": 1.8823529411764708e-05,
"loss": 0.2187,
"step": 192
},
{
"epoch": 0.23739237392373924,
"grad_norm": 3.5916690826416016,
"learning_rate": 1.892156862745098e-05,
"loss": 0.1593,
"step": 193
},
{
"epoch": 0.23862238622386223,
"grad_norm": 3.8676974773406982,
"learning_rate": 1.9019607843137255e-05,
"loss": 0.173,
"step": 194
},
{
"epoch": 0.23985239852398524,
"grad_norm": 4.338643550872803,
"learning_rate": 1.911764705882353e-05,
"loss": 0.2377,
"step": 195
},
{
"epoch": 0.24108241082410825,
"grad_norm": 4.509932994842529,
"learning_rate": 1.9215686274509807e-05,
"loss": 0.2281,
"step": 196
},
{
"epoch": 0.24231242312423124,
"grad_norm": 4.282917022705078,
"learning_rate": 1.931372549019608e-05,
"loss": 0.2651,
"step": 197
},
{
"epoch": 0.24354243542435425,
"grad_norm": 3.1566977500915527,
"learning_rate": 1.9411764705882355e-05,
"loss": 0.118,
"step": 198
},
{
"epoch": 0.24477244772447723,
"grad_norm": 4.118341445922852,
"learning_rate": 1.950980392156863e-05,
"loss": 0.1728,
"step": 199
},
{
"epoch": 0.24600246002460024,
"grad_norm": 4.250949859619141,
"learning_rate": 1.9607843137254903e-05,
"loss": 0.2299,
"step": 200
},
{
"epoch": 0.24723247232472326,
"grad_norm": 4.084754943847656,
"learning_rate": 1.9705882352941178e-05,
"loss": 0.2342,
"step": 201
},
{
"epoch": 0.24846248462484624,
"grad_norm": 3.939434051513672,
"learning_rate": 1.9803921568627454e-05,
"loss": 0.2413,
"step": 202
},
{
"epoch": 0.24969249692496925,
"grad_norm": 3.9612276554107666,
"learning_rate": 1.9901960784313726e-05,
"loss": 0.168,
"step": 203
},
{
"epoch": 0.25092250922509224,
"grad_norm": 3.401622772216797,
"learning_rate": 2e-05,
"loss": 0.1474,
"step": 204
},
{
"epoch": 0.2521525215252153,
"grad_norm": 3.2245850563049316,
"learning_rate": 1.9998802517966852e-05,
"loss": 0.1102,
"step": 205
},
{
"epoch": 0.25338253382533826,
"grad_norm": 4.254729270935059,
"learning_rate": 1.9995210358660037e-05,
"loss": 0.2326,
"step": 206
},
{
"epoch": 0.25461254612546125,
"grad_norm": 3.603159189224243,
"learning_rate": 1.9989224382388813e-05,
"loss": 0.1787,
"step": 207
},
{
"epoch": 0.25584255842558423,
"grad_norm": 3.434582471847534,
"learning_rate": 1.9980846022772978e-05,
"loss": 0.1423,
"step": 208
},
{
"epoch": 0.2570725707257073,
"grad_norm": 3.8560950756073,
"learning_rate": 1.997007728639956e-05,
"loss": 0.2069,
"step": 209
},
{
"epoch": 0.25830258302583026,
"grad_norm": 3.4417314529418945,
"learning_rate": 1.9956920752342226e-05,
"loss": 0.136,
"step": 210
},
{
"epoch": 0.25830258302583026,
"eval_loss": 0.10401736944913864,
"eval_runtime": 54.8034,
"eval_samples_per_second": 27.48,
"eval_steps_per_second": 0.219,
"eval_sts-test_pearson_cosine": 0.8792920980078169,
"eval_sts-test_pearson_dot": 0.8612401830255582,
"eval_sts-test_pearson_euclidean": 0.9094380100842928,
"eval_sts-test_pearson_manhattan": 0.9095661408662257,
"eval_sts-test_pearson_max": 0.9095661408662257,
"eval_sts-test_spearman_cosine": 0.905583034917972,
"eval_sts-test_spearman_dot": 0.8658094563311378,
"eval_sts-test_spearman_euclidean": 0.906560626223067,
"eval_sts-test_spearman_manhattan": 0.906644400584392,
"eval_sts-test_spearman_max": 0.906644400584392,
"step": 210
},
{
"epoch": 0.25953259532595324,
"grad_norm": 4.170388221740723,
"learning_rate": 1.9941379571543597e-05,
"loss": 0.2407,
"step": 211
},
{
"epoch": 0.2607626076260763,
"grad_norm": 4.218827247619629,
"learning_rate": 1.9923457466060637e-05,
"loss": 0.212,
"step": 212
},
{
"epoch": 0.26199261992619927,
"grad_norm": 3.6592209339141846,
"learning_rate": 1.9903158728173206e-05,
"loss": 0.1361,
"step": 213
},
{
"epoch": 0.26322263222632225,
"grad_norm": 4.208631992340088,
"learning_rate": 1.9880488219356086e-05,
"loss": 0.2356,
"step": 214
},
{
"epoch": 0.2644526445264453,
"grad_norm": 2.9232637882232666,
"learning_rate": 1.9855451369114677e-05,
"loss": 0.1059,
"step": 215
},
{
"epoch": 0.2656826568265683,
"grad_norm": 4.299160480499268,
"learning_rate": 1.9828054173684646e-05,
"loss": 0.2501,
"step": 216
},
{
"epoch": 0.26691266912669126,
"grad_norm": 4.013469219207764,
"learning_rate": 1.9798303194595846e-05,
"loss": 0.1817,
"step": 217
},
{
"epoch": 0.26814268142681424,
"grad_norm": 3.691553831100464,
"learning_rate": 1.976620555710087e-05,
"loss": 0.2022,
"step": 218
},
{
"epoch": 0.2693726937269373,
"grad_norm": 4.433103561401367,
"learning_rate": 1.973176894846855e-05,
"loss": 0.2235,
"step": 219
},
{
"epoch": 0.27060270602706027,
"grad_norm": 4.862768173217773,
"learning_rate": 1.9695001616142916e-05,
"loss": 0.2437,
"step": 220
},
{
"epoch": 0.27183271832718325,
"grad_norm": 3.9157614707946777,
"learning_rate": 1.965591236576794e-05,
"loss": 0.1859,
"step": 221
},
{
"epoch": 0.2730627306273063,
"grad_norm": 4.705247402191162,
"learning_rate": 1.9614510559078626e-05,
"loss": 0.2167,
"step": 222
},
{
"epoch": 0.2742927429274293,
"grad_norm": 3.890500068664551,
"learning_rate": 1.95708061116589e-05,
"loss": 0.1495,
"step": 223
},
{
"epoch": 0.27552275522755226,
"grad_norm": 4.393867492675781,
"learning_rate": 1.9524809490566878e-05,
"loss": 0.2876,
"step": 224
},
{
"epoch": 0.2767527675276753,
"grad_norm": 3.782416582107544,
"learning_rate": 1.9476531711828027e-05,
"loss": 0.1842,
"step": 225
},
{
"epoch": 0.2779827798277983,
"grad_norm": 3.32236647605896,
"learning_rate": 1.942598433779687e-05,
"loss": 0.144,
"step": 226
},
{
"epoch": 0.27921279212792127,
"grad_norm": 3.9284870624542236,
"learning_rate": 1.9373179474387858e-05,
"loss": 0.1571,
"step": 227
},
{
"epoch": 0.28044280442804426,
"grad_norm": 3.847404956817627,
"learning_rate": 1.9318129768176033e-05,
"loss": 0.209,
"step": 228
},
{
"epoch": 0.2816728167281673,
"grad_norm": 4.21238899230957,
"learning_rate": 1.926084840336821e-05,
"loss": 0.2075,
"step": 229
},
{
"epoch": 0.2829028290282903,
"grad_norm": 4.167908191680908,
"learning_rate": 1.9201349098645433e-05,
"loss": 0.1722,
"step": 230
},
{
"epoch": 0.28413284132841327,
"grad_norm": 3.7701351642608643,
"learning_rate": 1.9139646103877378e-05,
"loss": 0.1464,
"step": 231
},
{
"epoch": 0.28413284132841327,
"eval_loss": 0.10392418503761292,
"eval_runtime": 54.7341,
"eval_samples_per_second": 27.515,
"eval_steps_per_second": 0.219,
"eval_sts-test_pearson_cosine": 0.8822954324429473,
"eval_sts-test_pearson_dot": 0.8642863367586305,
"eval_sts-test_pearson_euclidean": 0.9122889538029727,
"eval_sts-test_pearson_manhattan": 0.912099304650421,
"eval_sts-test_pearson_max": 0.9122889538029727,
"eval_sts-test_spearman_cosine": 0.9087387596175093,
"eval_sts-test_spearman_dot": 0.8704923178256567,
"eval_sts-test_spearman_euclidean": 0.9097842833373965,
"eval_sts-test_spearman_manhattan": 0.9095372563745162,
"eval_sts-test_spearman_max": 0.9097842833373965,
"step": 231
},
{
"epoch": 0.2853628536285363,
"grad_norm": 4.548703193664551,
"learning_rate": 1.9075754196709574e-05,
"loss": 0.2675,
"step": 232
},
{
"epoch": 0.2865928659286593,
"grad_norm": 5.041469573974609,
"learning_rate": 1.900968867902419e-05,
"loss": 0.2585,
"step": 233
},
{
"epoch": 0.2878228782287823,
"grad_norm": 3.0036237239837646,
"learning_rate": 1.894146537327533e-05,
"loss": 0.134,
"step": 234
},
{
"epoch": 0.2890528905289053,
"grad_norm": 3.6082603931427,
"learning_rate": 1.8871100618699553e-05,
"loss": 0.1765,
"step": 235
},
{
"epoch": 0.2902829028290283,
"grad_norm": 3.8336241245269775,
"learning_rate": 1.8798611267402745e-05,
"loss": 0.1826,
"step": 236
},
{
"epoch": 0.2915129151291513,
"grad_norm": 4.307932376861572,
"learning_rate": 1.872401468032406e-05,
"loss": 0.222,
"step": 237
},
{
"epoch": 0.29274292742927427,
"grad_norm": 3.153963088989258,
"learning_rate": 1.864732872307804e-05,
"loss": 0.134,
"step": 238
},
{
"epoch": 0.2939729397293973,
"grad_norm": 4.044833660125732,
"learning_rate": 1.8568571761675893e-05,
"loss": 0.1902,
"step": 239
},
{
"epoch": 0.2952029520295203,
"grad_norm": 4.640310287475586,
"learning_rate": 1.8487762658126872e-05,
"loss": 0.2461,
"step": 240
},
{
"epoch": 0.2964329643296433,
"grad_norm": 4.932340145111084,
"learning_rate": 1.8404920765920898e-05,
"loss": 0.3094,
"step": 241
},
{
"epoch": 0.2976629766297663,
"grad_norm": 4.0233917236328125,
"learning_rate": 1.8320065925393468e-05,
"loss": 0.2252,
"step": 242
},
{
"epoch": 0.2988929889298893,
"grad_norm": 4.369536399841309,
"learning_rate": 1.8233218458973984e-05,
"loss": 0.2466,
"step": 243
},
{
"epoch": 0.3001230012300123,
"grad_norm": 3.6295106410980225,
"learning_rate": 1.814439916631857e-05,
"loss": 0.139,
"step": 244
},
{
"epoch": 0.3013530135301353,
"grad_norm": 3.705105781555176,
"learning_rate": 1.8053629319328662e-05,
"loss": 0.154,
"step": 245
},
{
"epoch": 0.3025830258302583,
"grad_norm": 3.7480130195617676,
"learning_rate": 1.796093065705644e-05,
"loss": 0.1979,
"step": 246
},
{
"epoch": 0.3038130381303813,
"grad_norm": 2.5885541439056396,
"learning_rate": 1.786632538049842e-05,
"loss": 0.1121,
"step": 247
},
{
"epoch": 0.3050430504305043,
"grad_norm": 3.3691048622131348,
"learning_rate": 1.7769836147278385e-05,
"loss": 0.1361,
"step": 248
},
{
"epoch": 0.3062730627306273,
"grad_norm": 4.20883321762085,
"learning_rate": 1.7671486066220965e-05,
"loss": 0.2492,
"step": 249
},
{
"epoch": 0.3075030750307503,
"grad_norm": 3.8119523525238037,
"learning_rate": 1.757129869181718e-05,
"loss": 0.1903,
"step": 250
},
{
"epoch": 0.3087330873308733,
"grad_norm": 4.464923858642578,
"learning_rate": 1.746929801858317e-05,
"loss": 0.2333,
"step": 251
},
{
"epoch": 0.30996309963099633,
"grad_norm": 4.029540061950684,
"learning_rate": 1.736550847531366e-05,
"loss": 0.1805,
"step": 252
},
{
"epoch": 0.30996309963099633,
"eval_loss": 0.10298814624547958,
"eval_runtime": 54.69,
"eval_samples_per_second": 27.537,
"eval_steps_per_second": 0.219,
"eval_sts-test_pearson_cosine": 0.881635556166377,
"eval_sts-test_pearson_dot": 0.862389303076424,
"eval_sts-test_pearson_euclidean": 0.9125260227425505,
"eval_sts-test_pearson_manhattan": 0.9128421094636647,
"eval_sts-test_pearson_max": 0.9128421094636647,
"eval_sts-test_spearman_cosine": 0.9098964747497047,
"eval_sts-test_spearman_dot": 0.8698043119330254,
"eval_sts-test_spearman_euclidean": 0.9103645729438589,
"eval_sts-test_spearman_manhattan": 0.9110424650514156,
"eval_sts-test_spearman_max": 0.9110424650514156,
"step": 252
},
{
"epoch": 0.3111931119311193,
"grad_norm": 3.990410089492798,
"learning_rate": 1.725995491923131e-05,
"loss": 0.1929,
"step": 253
},
{
"epoch": 0.3124231242312423,
"grad_norm": 3.7284581661224365,
"learning_rate": 1.7152662630033506e-05,
"loss": 0.1424,
"step": 254
},
{
"epoch": 0.31365313653136534,
"grad_norm": 3.8791370391845703,
"learning_rate": 1.7043657303837965e-05,
"loss": 0.2318,
"step": 255
},
{
"epoch": 0.3148831488314883,
"grad_norm": 3.4804205894470215,
"learning_rate": 1.693296504702862e-05,
"loss": 0.1524,
"step": 256
},
{
"epoch": 0.3161131611316113,
"grad_norm": 3.573451519012451,
"learning_rate": 1.682061237000322e-05,
"loss": 0.2195,
"step": 257
},
{
"epoch": 0.3173431734317343,
"grad_norm": 3.5766184329986572,
"learning_rate": 1.6706626180824185e-05,
"loss": 0.1338,
"step": 258
},
{
"epoch": 0.31857318573185733,
"grad_norm": 4.488210201263428,
"learning_rate": 1.659103377877423e-05,
"loss": 0.2543,
"step": 259
},
{
"epoch": 0.3198031980319803,
"grad_norm": 4.0144147872924805,
"learning_rate": 1.647386284781828e-05,
"loss": 0.202,
"step": 260
},
{
"epoch": 0.3210332103321033,
"grad_norm": 3.4031426906585693,
"learning_rate": 1.6355141449973254e-05,
"loss": 0.1489,
"step": 261
},
{
"epoch": 0.32226322263222634,
"grad_norm": 3.8359596729278564,
"learning_rate": 1.6234898018587336e-05,
"loss": 0.1937,
"step": 262
},
{
"epoch": 0.3234932349323493,
"grad_norm": 4.457846641540527,
"learning_rate": 1.6113161351530257e-05,
"loss": 0.2334,
"step": 263
},
{
"epoch": 0.3247232472324723,
"grad_norm": 4.167722702026367,
"learning_rate": 1.598996060429634e-05,
"loss": 0.1942,
"step": 264
},
{
"epoch": 0.32595325953259535,
"grad_norm": 4.352579116821289,
"learning_rate": 1.586532528302183e-05,
"loss": 0.2013,
"step": 265
},
{
"epoch": 0.32718327183271834,
"grad_norm": 5.293665409088135,
"learning_rate": 1.5739285237418323e-05,
"loss": 0.2954,
"step": 266
},
{
"epoch": 0.3284132841328413,
"grad_norm": 3.7269585132598877,
"learning_rate": 1.5611870653623826e-05,
"loss": 0.188,
"step": 267
},
{
"epoch": 0.3296432964329643,
"grad_norm": 3.8485231399536133,
"learning_rate": 1.548311204697331e-05,
"loss": 0.1688,
"step": 268
},
{
"epoch": 0.33087330873308735,
"grad_norm": 3.183656692504883,
"learning_rate": 1.5353040254690396e-05,
"loss": 0.1415,
"step": 269
},
{
"epoch": 0.33210332103321033,
"grad_norm": 4.292448997497559,
"learning_rate": 1.5221686428501929e-05,
"loss": 0.2249,
"step": 270
},
{
"epoch": 0.3333333333333333,
"grad_norm": 4.776716232299805,
"learning_rate": 1.508908202717729e-05,
"loss": 0.2606,
"step": 271
},
{
"epoch": 0.33456334563345635,
"grad_norm": 4.753313064575195,
"learning_rate": 1.4955258808994093e-05,
"loss": 0.2559,
"step": 272
},
{
"epoch": 0.33579335793357934,
"grad_norm": 4.271998882293701,
"learning_rate": 1.482024882413222e-05,
"loss": 0.2673,
"step": 273
},
{
"epoch": 0.33579335793357934,
"eval_loss": 0.10389706492424011,
"eval_runtime": 54.7165,
"eval_samples_per_second": 27.524,
"eval_steps_per_second": 0.219,
"eval_sts-test_pearson_cosine": 0.8802537579023317,
"eval_sts-test_pearson_dot": 0.8611279643446735,
"eval_sts-test_pearson_euclidean": 0.9104828974356078,
"eval_sts-test_pearson_manhattan": 0.9112003024801107,
"eval_sts-test_pearson_max": 0.9112003024801107,
"eval_sts-test_spearman_cosine": 0.9077587069930375,
"eval_sts-test_spearman_dot": 0.8683557106354935,
"eval_sts-test_spearman_euclidean": 0.908703271867226,
"eval_sts-test_spearman_manhattan": 0.9088831719380195,
"eval_sts-test_spearman_max": 0.9088831719380195,
"step": 273
},
{
"epoch": 0.3370233702337023,
"grad_norm": 3.5790419578552246,
"learning_rate": 1.4684084406997903e-05,
"loss": 0.1618,
"step": 274
},
{
"epoch": 0.33825338253382536,
"grad_norm": 4.819033145904541,
"learning_rate": 1.4546798168479756e-05,
"loss": 0.2602,
"step": 275
},
{
"epoch": 0.33948339483394835,
"grad_norm": 4.567826747894287,
"learning_rate": 1.4408422988138585e-05,
"loss": 0.2339,
"step": 276
},
{
"epoch": 0.34071340713407133,
"grad_norm": 4.182609558105469,
"learning_rate": 1.4268992006332847e-05,
"loss": 0.1843,
"step": 277
},
{
"epoch": 0.3419434194341943,
"grad_norm": 3.4522156715393066,
"learning_rate": 1.412853861628166e-05,
"loss": 0.133,
"step": 278
},
{
"epoch": 0.34317343173431736,
"grad_norm": 4.6532301902771,
"learning_rate": 1.3987096456067236e-05,
"loss": 0.2345,
"step": 279
},
{
"epoch": 0.34440344403444034,
"grad_norm": 4.240933418273926,
"learning_rate": 1.3844699400578696e-05,
"loss": 0.2808,
"step": 280
},
{
"epoch": 0.3456334563345633,
"grad_norm": 3.000117063522339,
"learning_rate": 1.3701381553399144e-05,
"loss": 0.1044,
"step": 281
},
{
"epoch": 0.34686346863468637,
"grad_norm": 3.7988216876983643,
"learning_rate": 1.3557177238637987e-05,
"loss": 0.1622,
"step": 282
},
{
"epoch": 0.34809348093480935,
"grad_norm": 3.2597107887268066,
"learning_rate": 1.3412120992710425e-05,
"loss": 0.1303,
"step": 283
},
{
"epoch": 0.34932349323493234,
"grad_norm": 3.2426445484161377,
"learning_rate": 1.3266247556066122e-05,
"loss": 0.1453,
"step": 284
},
{
"epoch": 0.3505535055350554,
"grad_norm": 4.482458114624023,
"learning_rate": 1.3119591864868979e-05,
"loss": 0.237,
"step": 285
},
{
"epoch": 0.35178351783517836,
"grad_norm": 4.062747478485107,
"learning_rate": 1.2972189042630044e-05,
"loss": 0.1726,
"step": 286
},
{
"epoch": 0.35301353013530135,
"grad_norm": 3.9885880947113037,
"learning_rate": 1.2824074391795571e-05,
"loss": 0.2195,
"step": 287
},
{
"epoch": 0.35424354243542433,
"grad_norm": 5.205960750579834,
"learning_rate": 1.2675283385292212e-05,
"loss": 0.3016,
"step": 288
},
{
"epoch": 0.35547355473554737,
"grad_norm": 3.2820823192596436,
"learning_rate": 1.252585165803135e-05,
"loss": 0.1626,
"step": 289
},
{
"epoch": 0.35670356703567035,
"grad_norm": 4.133265495300293,
"learning_rate": 1.2375814998374714e-05,
"loss": 0.1902,
"step": 290
},
{
"epoch": 0.35793357933579334,
"grad_norm": 3.349637746810913,
"learning_rate": 1.2225209339563144e-05,
"loss": 0.1387,
"step": 291
},
{
"epoch": 0.3591635916359164,
"grad_norm": 2.7458724975585938,
"learning_rate": 1.2074070751110753e-05,
"loss": 0.1047,
"step": 292
},
{
"epoch": 0.36039360393603936,
"grad_norm": 3.7697091102600098,
"learning_rate": 1.1922435430166372e-05,
"loss": 0.1954,
"step": 293
},
{
"epoch": 0.36162361623616235,
"grad_norm": 4.1529622077941895,
"learning_rate": 1.1770339692844484e-05,
"loss": 0.2089,
"step": 294
},
{
"epoch": 0.36162361623616235,
"eval_loss": 0.10294178128242493,
"eval_runtime": 54.5996,
"eval_samples_per_second": 27.583,
"eval_steps_per_second": 0.22,
"eval_sts-test_pearson_cosine": 0.8803510241742277,
"eval_sts-test_pearson_dot": 0.8614562911474479,
"eval_sts-test_pearson_euclidean": 0.9105274765595701,
"eval_sts-test_pearson_manhattan": 0.9112776803683604,
"eval_sts-test_pearson_max": 0.9112776803683604,
"eval_sts-test_spearman_cosine": 0.9082726304788564,
"eval_sts-test_spearman_dot": 0.8687116203836315,
"eval_sts-test_spearman_euclidean": 0.9088194013905367,
"eval_sts-test_spearman_manhattan": 0.9091452800759889,
"eval_sts-test_spearman_max": 0.9091452800759889,
"step": 294
},
{
"epoch": 0.3628536285362854,
"grad_norm": 3.3324942588806152,
"learning_rate": 1.161781996552765e-05,
"loss": 0.1485,
"step": 295
},
{
"epoch": 0.3640836408364084,
"grad_norm": 3.477254867553711,
"learning_rate": 1.1464912776142494e-05,
"loss": 0.1724,
"step": 296
},
{
"epoch": 0.36531365313653136,
"grad_norm": 3.933436393737793,
"learning_rate": 1.1311654745411424e-05,
"loss": 0.2017,
"step": 297
},
{
"epoch": 0.36654366543665434,
"grad_norm": 3.6212170124053955,
"learning_rate": 1.115808257808209e-05,
"loss": 0.1591,
"step": 298
},
{
"epoch": 0.3677736777367774,
"grad_norm": 4.0765700340271,
"learning_rate": 1.1004233054136726e-05,
"loss": 0.2396,
"step": 299
},
{
"epoch": 0.36900369003690037,
"grad_norm": 3.589646816253662,
"learning_rate": 1.0850143019983475e-05,
"loss": 0.1395,
"step": 300
},
{
"epoch": 0.37023370233702335,
"grad_norm": 3.7769243717193604,
"learning_rate": 1.0695849379631816e-05,
"loss": 0.1806,
"step": 301
},
{
"epoch": 0.3714637146371464,
"grad_norm": 3.4720847606658936,
"learning_rate": 1.0541389085854177e-05,
"loss": 0.1882,
"step": 302
},
{
"epoch": 0.3726937269372694,
"grad_norm": 2.9006810188293457,
"learning_rate": 1.038679913133589e-05,
"loss": 0.1188,
"step": 303
},
{
"epoch": 0.37392373923739236,
"grad_norm": 3.7660434246063232,
"learning_rate": 1.023211653981556e-05,
"loss": 0.1564,
"step": 304
},
{
"epoch": 0.3751537515375154,
"grad_norm": 5.082170486450195,
"learning_rate": 1.0077378357218023e-05,
"loss": 0.313,
"step": 305
},
{
"epoch": 0.3763837638376384,
"grad_norm": 3.5429434776306152,
"learning_rate": 9.922621642781982e-06,
"loss": 0.1455,
"step": 306
},
{
"epoch": 0.37761377613776137,
"grad_norm": 3.1348257064819336,
"learning_rate": 9.767883460184447e-06,
"loss": 0.1535,
"step": 307
},
{
"epoch": 0.37884378843788435,
"grad_norm": 2.881880521774292,
"learning_rate": 9.613200868664112e-06,
"loss": 0.099,
"step": 308
},
{
"epoch": 0.3800738007380074,
"grad_norm": 3.5104594230651855,
"learning_rate": 9.458610914145826e-06,
"loss": 0.1733,
"step": 309
},
{
"epoch": 0.3813038130381304,
"grad_norm": 3.9202194213867188,
"learning_rate": 9.304150620368189e-06,
"loss": 0.1891,
"step": 310
},
{
"epoch": 0.38253382533825336,
"grad_norm": 3.655240297317505,
"learning_rate": 9.149856980016529e-06,
"loss": 0.2128,
"step": 311
},
{
"epoch": 0.3837638376383764,
"grad_norm": 3.766303062438965,
"learning_rate": 8.995766945863278e-06,
"loss": 0.2042,
"step": 312
},
{
"epoch": 0.3849938499384994,
"grad_norm": 3.8122780323028564,
"learning_rate": 8.841917421917913e-06,
"loss": 0.203,
"step": 313
},
{
"epoch": 0.3862238622386224,
"grad_norm": 4.085626602172852,
"learning_rate": 8.688345254588579e-06,
"loss": 0.2249,
"step": 314
},
{
"epoch": 0.3874538745387454,
"grad_norm": 3.744234323501587,
"learning_rate": 8.53508722385751e-06,
"loss": 0.1597,
"step": 315
},
{
"epoch": 0.3874538745387454,
"eval_loss": 0.10140395164489746,
"eval_runtime": 54.6293,
"eval_samples_per_second": 27.568,
"eval_steps_per_second": 0.22,
"eval_sts-test_pearson_cosine": 0.8788980244871143,
"eval_sts-test_pearson_dot": 0.8616979928109831,
"eval_sts-test_pearson_euclidean": 0.9090675882181183,
"eval_sts-test_pearson_manhattan": 0.909351159725011,
"eval_sts-test_pearson_max": 0.909351159725011,
"eval_sts-test_spearman_cosine": 0.9074493862743003,
"eval_sts-test_spearman_dot": 0.8701774479505067,
"eval_sts-test_spearman_euclidean": 0.9075559837789346,
"eval_sts-test_spearman_manhattan": 0.9076191725600193,
"eval_sts-test_spearman_max": 0.9076191725600193,
"step": 315
},
{
"epoch": 0.3886838868388684,
"grad_norm": 3.4346373081207275,
"learning_rate": 8.382180034472353e-06,
"loss": 0.1358,
"step": 316
},
{
"epoch": 0.3899138991389914,
"grad_norm": 3.872002124786377,
"learning_rate": 8.229660307155518e-06,
"loss": 0.207,
"step": 317
},
{
"epoch": 0.39114391143911437,
"grad_norm": 3.4921915531158447,
"learning_rate": 8.077564569833633e-06,
"loss": 0.193,
"step": 318
},
{
"epoch": 0.3923739237392374,
"grad_norm": 3.2774128913879395,
"learning_rate": 7.92592924888925e-06,
"loss": 0.1141,
"step": 319
},
{
"epoch": 0.3936039360393604,
"grad_norm": 4.900540351867676,
"learning_rate": 7.774790660436857e-06,
"loss": 0.2835,
"step": 320
},
{
"epoch": 0.3948339483394834,
"grad_norm": 4.073228359222412,
"learning_rate": 7.6241850016252915e-06,
"loss": 0.2589,
"step": 321
},
{
"epoch": 0.3960639606396064,
"grad_norm": 2.549339532852173,
"learning_rate": 7.4741483419686475e-06,
"loss": 0.088,
"step": 322
},
{
"epoch": 0.3972939729397294,
"grad_norm": 4.411525726318359,
"learning_rate": 7.324716614707792e-06,
"loss": 0.1675,
"step": 323
},
{
"epoch": 0.3985239852398524,
"grad_norm": 3.336052656173706,
"learning_rate": 7.175925608204428e-06,
"loss": 0.1525,
"step": 324
},
{
"epoch": 0.3997539975399754,
"grad_norm": 3.2689311504364014,
"learning_rate": 7.0278109573699574e-06,
"loss": 0.1401,
"step": 325
},
{
"epoch": 0.4009840098400984,
"grad_norm": 3.8623855113983154,
"learning_rate": 6.880408135131022e-06,
"loss": 0.2109,
"step": 326
},
{
"epoch": 0.4022140221402214,
"grad_norm": 3.1453464031219482,
"learning_rate": 6.733752443933879e-06,
"loss": 0.1382,
"step": 327
},
{
"epoch": 0.4034440344403444,
"grad_norm": 3.09243106842041,
"learning_rate": 6.587879007289576e-06,
"loss": 0.1724,
"step": 328
}
],
"logging_steps": 1,
"max_steps": 813,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 82,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 320,
"trial_name": null,
"trial_params": null
}