| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.995910949568378, | |
| "eval_steps": 400, | |
| "global_step": 137, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03634711494775102, | |
| "grad_norm": 292.3499633268165, | |
| "learning_rate": 2.857142857142857e-07, | |
| "logits/chosen": -9.351981163024902, | |
| "logits/rejected": -9.40494441986084, | |
| "logps/chosen": -1.8519542217254639, | |
| "logps/rejected": -1.6600620746612549, | |
| "loss": 8.7269, | |
| "rewards/accuracies": 0.4124999940395355, | |
| "rewards/chosen": -18.519542694091797, | |
| "rewards/margins": -1.9189239740371704, | |
| "rewards/rejected": -16.60062026977539, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.07269422989550205, | |
| "grad_norm": 176.49993734667456, | |
| "learning_rate": 5.714285714285714e-07, | |
| "logits/chosen": -8.979061126708984, | |
| "logits/rejected": -9.256832122802734, | |
| "logps/chosen": -1.6596624851226807, | |
| "logps/rejected": -1.5709294080734253, | |
| "loss": 7.2313, | |
| "rewards/accuracies": 0.35624998807907104, | |
| "rewards/chosen": -16.59662437438965, | |
| "rewards/margins": -0.8873310089111328, | |
| "rewards/rejected": -15.709294319152832, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.10904134484325306, | |
| "grad_norm": 107.47412480741363, | |
| "learning_rate": 7.998695344323425e-07, | |
| "logits/chosen": -8.614570617675781, | |
| "logits/rejected": -8.825468063354492, | |
| "logps/chosen": -1.4021928310394287, | |
| "logps/rejected": -1.2052686214447021, | |
| "loss": 6.7486, | |
| "rewards/accuracies": 0.36250001192092896, | |
| "rewards/chosen": -14.021929740905762, | |
| "rewards/margins": -1.969242811203003, | |
| "rewards/rejected": -12.052685737609863, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.1453884597910041, | |
| "grad_norm": 82.01335104879803, | |
| "learning_rate": 7.953121695121394e-07, | |
| "logits/chosen": -7.269529819488525, | |
| "logits/rejected": -7.306704044342041, | |
| "logps/chosen": -1.0399733781814575, | |
| "logps/rejected": -1.0558375120162964, | |
| "loss": 5.8003, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": -10.39973258972168, | |
| "rewards/margins": 0.15864244103431702, | |
| "rewards/rejected": -10.558377265930176, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.18173557473875512, | |
| "grad_norm": 72.82132091492836, | |
| "learning_rate": 7.843163833184991e-07, | |
| "logits/chosen": -6.520535945892334, | |
| "logits/rejected": -6.559422969818115, | |
| "logps/chosen": -0.8438193202018738, | |
| "logps/rejected": -0.8522235751152039, | |
| "loss": 5.0458, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": -8.438192367553711, | |
| "rewards/margins": 0.08404362201690674, | |
| "rewards/rejected": -8.522235870361328, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.21808268968650613, | |
| "grad_norm": 69.03727598335514, | |
| "learning_rate": 7.670612634414511e-07, | |
| "logits/chosen": -5.9485015869140625, | |
| "logits/rejected": -5.740406513214111, | |
| "logps/chosen": -0.8595390319824219, | |
| "logps/rejected": -0.8615515828132629, | |
| "loss": 4.7981, | |
| "rewards/accuracies": 0.46875, | |
| "rewards/chosen": -8.595389366149902, | |
| "rewards/margins": 0.020125534385442734, | |
| "rewards/rejected": -8.615514755249023, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.25442980463425713, | |
| "grad_norm": 71.78320222375913, | |
| "learning_rate": 7.438278427948805e-07, | |
| "logits/chosen": -6.031136989593506, | |
| "logits/rejected": -5.746572494506836, | |
| "logps/chosen": -0.7891378998756409, | |
| "logps/rejected": -0.874317467212677, | |
| "loss": 4.7484, | |
| "rewards/accuracies": 0.5687500238418579, | |
| "rewards/chosen": -7.891378879547119, | |
| "rewards/margins": 0.8517962694168091, | |
| "rewards/rejected": -8.743175506591797, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.2907769195820082, | |
| "grad_norm": 72.82722028196132, | |
| "learning_rate": 7.149945224533862e-07, | |
| "logits/chosen": -5.47674560546875, | |
| "logits/rejected": -5.537765979766846, | |
| "logps/chosen": -0.7930837869644165, | |
| "logps/rejected": -0.904737651348114, | |
| "loss": 4.6279, | |
| "rewards/accuracies": 0.581250011920929, | |
| "rewards/chosen": -7.930837154388428, | |
| "rewards/margins": 1.1165391206741333, | |
| "rewards/rejected": -9.04737663269043, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.3271240345297592, | |
| "grad_norm": 61.23739520942703, | |
| "learning_rate": 6.810309086608129e-07, | |
| "logits/chosen": -5.746423721313477, | |
| "logits/rejected": -5.778194427490234, | |
| "logps/chosen": -0.7503607273101807, | |
| "logps/rejected": -0.8812963366508484, | |
| "loss": 4.7314, | |
| "rewards/accuracies": 0.5874999761581421, | |
| "rewards/chosen": -7.503607749938965, | |
| "rewards/margins": 1.3093562126159668, | |
| "rewards/rejected": -8.81296443939209, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.36347114947751025, | |
| "grad_norm": 68.26509051330353, | |
| "learning_rate": 6.424901643866552e-07, | |
| "logits/chosen": -5.5795392990112305, | |
| "logits/rejected": -5.432155609130859, | |
| "logps/chosen": -0.816428005695343, | |
| "logps/rejected": -0.9023356437683105, | |
| "loss": 4.7166, | |
| "rewards/accuracies": 0.5687500238418579, | |
| "rewards/chosen": -8.16427993774414, | |
| "rewards/margins": 0.8590763211250305, | |
| "rewards/rejected": -9.023356437683105, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.39981826442526125, | |
| "grad_norm": 71.6111666575235, | |
| "learning_rate": 6e-07, | |
| "logits/chosen": -5.5865983963012695, | |
| "logits/rejected": -5.635308265686035, | |
| "logps/chosen": -0.7537165880203247, | |
| "logps/rejected": -0.9402713775634766, | |
| "loss": 4.4952, | |
| "rewards/accuracies": 0.6000000238418579, | |
| "rewards/chosen": -7.537165641784668, | |
| "rewards/margins": 1.8655481338500977, | |
| "rewards/rejected": -9.402713775634766, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.43616537937301225, | |
| "grad_norm": 65.19861764331375, | |
| "learning_rate": 5.542524497952543e-07, | |
| "logits/chosen": -5.944064140319824, | |
| "logits/rejected": -5.957856178283691, | |
| "logps/chosen": -0.7788850665092468, | |
| "logps/rejected": -0.9255874752998352, | |
| "loss": 4.3522, | |
| "rewards/accuracies": 0.5375000238418579, | |
| "rewards/chosen": -7.7888503074646, | |
| "rewards/margins": 1.4670238494873047, | |
| "rewards/rejected": -9.255874633789062, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.4725124943207633, | |
| "grad_norm": 67.86424317787042, | |
| "learning_rate": 5.059926008786647e-07, | |
| "logits/chosen": -5.869328498840332, | |
| "logits/rejected": -5.912378787994385, | |
| "logps/chosen": -0.7861538529396057, | |
| "logps/rejected": -0.944945216178894, | |
| "loss": 4.3518, | |
| "rewards/accuracies": 0.5874999761581421, | |
| "rewards/chosen": -7.861537933349609, | |
| "rewards/margins": 1.587913990020752, | |
| "rewards/rejected": -9.449451446533203, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.5088596092685143, | |
| "grad_norm": 70.15593128180441, | |
| "learning_rate": 4.5600645798745166e-07, | |
| "logits/chosen": -6.250380039215088, | |
| "logits/rejected": -6.102661609649658, | |
| "logps/chosen": -0.7886101603507996, | |
| "logps/rejected": -0.9173639416694641, | |
| "loss": 4.2954, | |
| "rewards/accuracies": 0.5562499761581421, | |
| "rewards/chosen": -7.886102199554443, | |
| "rewards/margins": 1.2875380516052246, | |
| "rewards/rejected": -9.173639297485352, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.5452067242162654, | |
| "grad_norm": 72.06139720995746, | |
| "learning_rate": 4.051081418863895e-07, | |
| "logits/chosen": -6.077569484710693, | |
| "logits/rejected": -5.909134864807129, | |
| "logps/chosen": -0.8054354786872864, | |
| "logps/rejected": -1.0184094905853271, | |
| "loss": 4.1856, | |
| "rewards/accuracies": 0.6625000238418579, | |
| "rewards/chosen": -8.054354667663574, | |
| "rewards/margins": 2.129739284515381, | |
| "rewards/rejected": -10.18409538269043, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.5815538391640164, | |
| "grad_norm": 111.49055555981266, | |
| "learning_rate": 3.541266298406398e-07, | |
| "logits/chosen": -6.036823749542236, | |
| "logits/rejected": -5.949917793273926, | |
| "logps/chosen": -0.8323701024055481, | |
| "logps/rejected": -0.9793428182601929, | |
| "loss": 4.0504, | |
| "rewards/accuracies": 0.612500011920929, | |
| "rewards/chosen": -8.323701858520508, | |
| "rewards/margins": 1.4697275161743164, | |
| "rewards/rejected": -9.793428421020508, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.6179009541117674, | |
| "grad_norm": 81.14697844302252, | |
| "learning_rate": 3.0389225412181565e-07, | |
| "logits/chosen": -5.898052215576172, | |
| "logits/rejected": -5.901252746582031, | |
| "logps/chosen": -0.7811166644096375, | |
| "logps/rejected": -1.0059183835983276, | |
| "loss": 4.2063, | |
| "rewards/accuracies": 0.6187499761581421, | |
| "rewards/chosen": -7.811167240142822, | |
| "rewards/margins": 2.248016595840454, | |
| "rewards/rejected": -10.059184074401855, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.6542480690595184, | |
| "grad_norm": 77.67069971551602, | |
| "learning_rate": 2.5522317844515273e-07, | |
| "logits/chosen": -6.16554069519043, | |
| "logits/rejected": -6.2015275955200195, | |
| "logps/chosen": -0.8136342167854309, | |
| "logps/rejected": -1.0194966793060303, | |
| "loss": 4.0071, | |
| "rewards/accuracies": 0.637499988079071, | |
| "rewards/chosen": -8.136343002319336, | |
| "rewards/margins": 2.058624267578125, | |
| "rewards/rejected": -10.194966316223145, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.6905951840072694, | |
| "grad_norm": 67.22921956049545, | |
| "learning_rate": 2.0891207259509476e-07, | |
| "logits/chosen": -6.2798662185668945, | |
| "logits/rejected": -6.4411468505859375, | |
| "logps/chosen": -0.780041515827179, | |
| "logps/rejected": -0.9418380856513977, | |
| "loss": 4.2225, | |
| "rewards/accuracies": 0.6499999761581421, | |
| "rewards/chosen": -7.8004150390625, | |
| "rewards/margins": 1.6179659366607666, | |
| "rewards/rejected": -9.418381690979004, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.7269422989550205, | |
| "grad_norm": 71.88699575292867, | |
| "learning_rate": 1.6571320226872206e-07, | |
| "logits/chosen": -6.111327171325684, | |
| "logits/rejected": -5.933043003082275, | |
| "logps/chosen": -0.79816734790802, | |
| "logps/rejected": -1.0315134525299072, | |
| "loss": 4.1323, | |
| "rewards/accuracies": 0.6875, | |
| "rewards/chosen": -7.981672763824463, | |
| "rewards/margins": 2.3334617614746094, | |
| "rewards/rejected": -10.31513500213623, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.7632894139027715, | |
| "grad_norm": 78.83415408435721, | |
| "learning_rate": 1.2633014440382787e-07, | |
| "logits/chosen": -6.445625305175781, | |
| "logits/rejected": -6.3378729820251465, | |
| "logps/chosen": -0.7882248163223267, | |
| "logps/rejected": -0.938172459602356, | |
| "loss": 4.2202, | |
| "rewards/accuracies": 0.637499988079071, | |
| "rewards/chosen": -7.882249355316162, | |
| "rewards/margins": 1.4994752407073975, | |
| "rewards/rejected": -9.38172435760498, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.7996365288505225, | |
| "grad_norm": 84.93158033781451, | |
| "learning_rate": 9.14043280712228e-08, | |
| "logits/chosen": -5.919665813446045, | |
| "logits/rejected": -5.9659295082092285, | |
| "logps/chosen": -0.807255744934082, | |
| "logps/rejected": -0.9430872797966003, | |
| "loss": 4.3819, | |
| "rewards/accuracies": 0.59375, | |
| "rewards/chosen": -8.07255744934082, | |
| "rewards/margins": 1.3583143949508667, | |
| "rewards/rejected": -9.430871963500977, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.8359836437982735, | |
| "grad_norm": 81.4577593129811, | |
| "learning_rate": 6.150458756494239e-08, | |
| "logits/chosen": -5.945127964019775, | |
| "logits/rejected": -5.908052921295166, | |
| "logps/chosen": -0.8148506879806519, | |
| "logps/rejected": -0.9752057790756226, | |
| "loss": 4.1354, | |
| "rewards/accuracies": 0.643750011920929, | |
| "rewards/chosen": -8.148507118225098, | |
| "rewards/margins": 1.6035501956939697, | |
| "rewards/rejected": -9.752057075500488, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.8723307587460245, | |
| "grad_norm": 86.67335944638306, | |
| "learning_rate": 3.711789783843522e-08, | |
| "logits/chosen": -5.938599109649658, | |
| "logits/rejected": -6.1739959716796875, | |
| "logps/chosen": -0.818856418132782, | |
| "logps/rejected": -0.9494014978408813, | |
| "loss": 4.1957, | |
| "rewards/accuracies": 0.581250011920929, | |
| "rewards/chosen": -8.18856430053711, | |
| "rewards/margins": 1.305450677871704, | |
| "rewards/rejected": -9.494014739990234, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.9086778736937755, | |
| "grad_norm": 73.2996295516593, | |
| "learning_rate": 1.8641443178027784e-08, | |
| "logits/chosen": -5.637792110443115, | |
| "logits/rejected": -5.634159088134766, | |
| "logps/chosen": -0.7925742268562317, | |
| "logps/rejected": -1.0251243114471436, | |
| "loss": 4.0793, | |
| "rewards/accuracies": 0.643750011920929, | |
| "rewards/chosen": -7.925743103027344, | |
| "rewards/margins": 2.325500011444092, | |
| "rewards/rejected": -10.251241683959961, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.9450249886415266, | |
| "grad_norm": 75.69723178942408, | |
| "learning_rate": 6.376148290617145e-09, | |
| "logits/chosen": -5.835230350494385, | |
| "logits/rejected": -5.948345184326172, | |
| "logps/chosen": -0.820753276348114, | |
| "logps/rejected": -1.0133521556854248, | |
| "loss": 4.191, | |
| "rewards/accuracies": 0.5874999761581421, | |
| "rewards/chosen": -8.20753288269043, | |
| "rewards/margins": 1.9259881973266602, | |
| "rewards/rejected": -10.13352108001709, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.9813721035892776, | |
| "grad_norm": 80.16820363004321, | |
| "learning_rate": 5.217771643080127e-10, | |
| "logits/chosen": -6.059390068054199, | |
| "logits/rejected": -6.196063041687012, | |
| "logps/chosen": -0.7539313435554504, | |
| "logps/rejected": -0.9142172932624817, | |
| "loss": 4.1238, | |
| "rewards/accuracies": 0.668749988079071, | |
| "rewards/chosen": -7.539312839508057, | |
| "rewards/margins": 1.602860450744629, | |
| "rewards/rejected": -9.142173767089844, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.995910949568378, | |
| "step": 137, | |
| "total_flos": 0.0, | |
| "train_loss": 4.762774154217574, | |
| "train_runtime": 2004.0235, | |
| "train_samples_per_second": 8.785, | |
| "train_steps_per_second": 0.068 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 137, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 1000000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |